Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9f3bd0dc6d |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -153,6 +153,3 @@ $RECYCLE.BIN/
|
|||||||
*.lnk
|
*.lnk
|
||||||
|
|
||||||
# End of https://www.toptal.com/developers/gitignore/api/go,linux,intellij+all,windows
|
# End of https://www.toptal.com/developers/gitignore/api/go,linux,intellij+all,windows
|
||||||
|
|
||||||
/ALHP.go
|
|
||||||
/config.yaml
|
|
@@ -1,94 +0,0 @@
|
|||||||
linters-settings:
|
|
||||||
dupl:
|
|
||||||
threshold: 100
|
|
||||||
goconst:
|
|
||||||
min-len: 3
|
|
||||||
min-occurrences: 4
|
|
||||||
gocritic:
|
|
||||||
enabled-tags:
|
|
||||||
- diagnostic
|
|
||||||
- experimental
|
|
||||||
- opinionated
|
|
||||||
- performance
|
|
||||||
- style
|
|
||||||
disabled-checks:
|
|
||||||
- whyNoLint
|
|
||||||
- filepathJoin
|
|
||||||
mnd:
|
|
||||||
checks:
|
|
||||||
- argument
|
|
||||||
- case
|
|
||||||
- condition
|
|
||||||
- return
|
|
||||||
ignored-numbers:
|
|
||||||
- '0'
|
|
||||||
- '1'
|
|
||||||
- '2'
|
|
||||||
- '3'
|
|
||||||
- '4'
|
|
||||||
- '5'
|
|
||||||
- '6'
|
|
||||||
- '7'
|
|
||||||
- '8'
|
|
||||||
- '9'
|
|
||||||
- '10'
|
|
||||||
- '100'
|
|
||||||
- '1000'
|
|
||||||
ignored-functions:
|
|
||||||
- strings.SplitN
|
|
||||||
- os.OpenFile
|
|
||||||
- os.MkdirAll
|
|
||||||
- os.WriteFile
|
|
||||||
govet:
|
|
||||||
check-shadowing: false
|
|
||||||
lll:
|
|
||||||
line-length: 140
|
|
||||||
misspell:
|
|
||||||
locale: US
|
|
||||||
nolintlint:
|
|
||||||
allow-unused: false # report any unused nolint directives
|
|
||||||
require-explanation: false # don't require an explanation for nolint directives
|
|
||||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
|
||||||
tagliatelle:
|
|
||||||
case:
|
|
||||||
use-field-name: true
|
|
||||||
rules:
|
|
||||||
# Any struct tag type can be used.
|
|
||||||
# Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`.
|
|
||||||
json: snake
|
|
||||||
yaml: snake
|
|
||||||
xml: camel
|
|
||||||
|
|
||||||
linters:
|
|
||||||
enable-all: true
|
|
||||||
disable:
|
|
||||||
- gochecknoglobals
|
|
||||||
- depguard
|
|
||||||
- gci
|
|
||||||
- gofumpt
|
|
||||||
- goimports
|
|
||||||
- varnamelen
|
|
||||||
- funlen
|
|
||||||
- cyclop
|
|
||||||
- wsl
|
|
||||||
- nosnakecase
|
|
||||||
- nlreturn
|
|
||||||
- godot
|
|
||||||
- nestif
|
|
||||||
- wrapcheck
|
|
||||||
- gocognit
|
|
||||||
- gocyclo
|
|
||||||
- maintidx
|
|
||||||
- nonamedreturns
|
|
||||||
- exhaustivestruct
|
|
||||||
- exhaustruct
|
|
||||||
- forcetypeassert
|
|
||||||
- godox
|
|
||||||
- nakedret
|
|
||||||
- tagalign
|
|
||||||
- maligned
|
|
||||||
# remove for new projects
|
|
||||||
- errname
|
|
||||||
- goerr113
|
|
||||||
- depguard
|
|
||||||
- noctx
|
|
200
README.md
200
README.md
@@ -1,45 +1,11 @@
|
|||||||
# ALHP
|
# alhp
|
||||||
|
|
||||||
[](https://status.alhp.dev)
|
Build script for archlinux instructionset enabled repos.
|
||||||
[](https://goreportcard.com/report/somegit.dev/ALHP/ALHP.GO)
|
All packages are build with -march=<cpu-set> and -O3. Some packages will not build with -O3, they will just be provided from the official repos as usual.
|
||||||
[](https://pkg.go.dev/somegit.dev/ALHP/ALHP.GO)
|
|
||||||
[](https://somegit.dev/anonfunc/ALHP.GO/src/branch/master/LICENSE)
|
|
||||||
[](https://liberapay.com/anonfunc/)
|
|
||||||
|
|
||||||
Buildbot for Archlinux based repos with different
|
## Check your system for support
|
||||||
[x86-64 feature levels](https://www.phoronix.com/scan.php?page=news_item&px=GCC-11-x86-64-Feature-Levels), `-O3` and
|
|
||||||
[LTO](https://en.wikipedia.org/wiki/Interprocedural_optimization).
|
|
||||||
|
|
||||||
> [!WARNING]
|
**Important**: Before you enable any of these repos, check if your system supports x86-64-v3. You can do that with `/lib/ld-linux-x86-64.so.2 --help`. If you don't check beforehand you might be unable to boot your system anymore and need to downgrade any package that you may have upgraded.
|
||||||
> NVIDIA graphics users using the **proprietary driver** are strongly encouraged to read the
|
|
||||||
> [FAQ about Linux kernel modules](#directly-linked-kernel-modules) before enabling any repos.
|
|
||||||
|
|
||||||
---
|
|
||||||
<!-- TOC -->
|
|
||||||
* [Quick Start](#quick-start)
|
|
||||||
* [FAQ](#faq)
|
|
||||||
* [Matrix](#matrix)
|
|
||||||
* [Donations](#donations)
|
|
||||||
* [License and Legal](#license-and-legal)
|
|
||||||
<!-- TOC -->
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Quick Start
|
|
||||||
|
|
||||||
### 1. Check your system for support
|
|
||||||
|
|
||||||
> [!CAUTION]
|
|
||||||
> Before enabling any of these repos, make sure that your system supports the level of functionality you want to
|
|
||||||
> enable (e.g. `x86-64-v3`).
|
|
||||||
> **If you don't check first, you may not be able to boot your system and will have to downgrade any packages you may
|
|
||||||
have upgraded.**
|
|
||||||
|
|
||||||
Check which feature levels your CPU supports with
|
|
||||||
|
|
||||||
```bash
|
|
||||||
/lib/ld-linux-x86-64.so.2 --help
|
|
||||||
```
|
|
||||||
|
|
||||||
Example output snippet for a system supporting up to `x86-64-v3`:
|
Example output snippet for a system supporting up to `x86-64-v3`:
|
||||||
|
|
||||||
@@ -50,157 +16,59 @@ Subdirectories of glibc-hwcaps directories, in priority order:
|
|||||||
x86-64-v2 (supported, searched)
|
x86-64-v2 (supported, searched)
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!NOTE]
|
## Enable Repos
|
||||||
> ALHP repos for `x86-64-v2`, `x86-64-v3` and `x86-64-v4` are currently available. You can see all available
|
|
||||||
> repositories [here](https://alhp.dev/).
|
|
||||||
|
|
||||||
### 2. Install keyring & mirrorlist
|
To enable these complement repos you need to add them above the regular repos in `/etc/pacman.conf`
|
||||||
|
|
||||||
Install [alhp-keyring](https://aur.archlinux.org/packages/alhp-keyring/)
|
### Example pacman.conf
|
||||||
and [alhp-mirrorlist](https://aur.archlinux.org/packages/alhp-mirrorlist/) from the **AUR**.
|
|
||||||
|
|
||||||
Example with `yay`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
yay -S alhp-keyring alhp-mirrorlist
|
|
||||||
```
|
|
||||||
|
|
||||||
`alhp-keyring` provides the current signing keys used by ALHP, `alhp-mirrorlist` a selection of mirrors.
|
|
||||||
|
|
||||||
### 3. Choose a mirror (optional)
|
|
||||||
|
|
||||||
Edit `/etc/pacman.d/alhp-mirrorlist` and comment in/out the mirrors you want to enable/disable.
|
|
||||||
By default, a CDN mirror provided by ALHP is selected.
|
|
||||||
> [!NOTE]
|
|
||||||
> `cdn.alhp.dev` and `alhp.dev` are provided directly by ALHP. If you have problems with a mirror,
|
|
||||||
> open an issue at [the mirrorlist repo](https://somegit.dev/ALHP/alhp-mirrorlist).
|
|
||||||
|
|
||||||
### 4. Modify pacman.conf
|
|
||||||
|
|
||||||
Add the ALHP repos to your `/etc/pacman.conf`. Make sure the appropriate ALHP repository is **above** the Archlinux
|
|
||||||
repo.
|
|
||||||
|
|
||||||
Example for `x86-64-v3`:
|
|
||||||
|
|
||||||
```editorconfig
|
```editorconfig
|
||||||
[core-x86-64-v3]
|
[core-x86-64-v3]
|
||||||
Include = /etc/pacman.d/alhp-mirrorlist
|
Server = https://alhp.harting.dev/$repo/os/$arch/
|
||||||
|
|
||||||
|
[extra-x86-64-v3]
|
||||||
|
Server = https://alhp.harting.dev/$repo/os/$arch/
|
||||||
|
|
||||||
|
[community-x86-64-v3]
|
||||||
|
Server = https://alhp.harting.dev/$repo/os/$arch/
|
||||||
|
|
||||||
[core]
|
[core]
|
||||||
Include = /etc/pacman.d/mirrorlist
|
Include = /etc/pacman.d/mirrorlist
|
||||||
|
|
||||||
[extra-x86-64-v3]
|
|
||||||
Include = /etc/pacman.d/alhp-mirrorlist
|
|
||||||
|
|
||||||
[extra]
|
[extra]
|
||||||
Include = /etc/pacman.d/mirrorlist
|
Include = /etc/pacman.d/mirrorlist
|
||||||
|
|
||||||
# if you need [multilib] support
|
[community]
|
||||||
[multilib-x86-64-v3]
|
|
||||||
Include = /etc/pacman.d/alhp-mirrorlist
|
|
||||||
|
|
||||||
[multilib]
|
|
||||||
Include = /etc/pacman.d/mirrorlist
|
Include = /etc/pacman.d/mirrorlist
|
||||||
```
|
```
|
||||||
|
|
||||||
Replace `x86-64-v3` with the x86-64 feature level you want to enable.
|
Replace `x86-64-v3` with your cpu-set. More information about all available options on [this gcc page](https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html).
|
||||||
|
Currently, alhp.harting.dev only builds for `x86-64-v3` (list is subject to change).
|
||||||
|
You can see all available repositories [here](https://alhp.harting.dev/).
|
||||||
|
|
||||||
> [!TIP]
|
After finished adding the repos to `pacman.conf` you need to import and sign the used pgp key:
|
||||||
> Multiple layers can be stacked as described in https://somegit.dev/ALHP/ALHP.GO/issues/255#issuecomment-3335.
|
|
||||||
|
|
||||||
### 5. Update package database and upgrade
|
|
||||||
|
|
||||||
|
Import:
|
||||||
```
|
```
|
||||||
pacman -Suy
|
pacman-key --keyserver keyserver.ubuntu.com --recv-keys 0D4D2FDAF45468F3DDF59BEDE3D0D2CD3952E298
|
||||||
```
|
```
|
||||||
|
|
||||||
## FAQ
|
Local sign:
|
||||||
|
|
||||||
### Remove ALHP packages
|
|
||||||
|
|
||||||
To disable ALHP, remove all *x86-64-vX* entries in `/etc/pacman.conf` and remove `alhp-keyring` and `alhp-mirrorlist`.
|
|
||||||
|
|
||||||
After that, you can update pacman's databases and downgrade all packages, like
|
|
||||||
|
|
||||||
```
|
```
|
||||||
pacman -Suuy
|
pacman-key --lsign-key 0D4D2FDAF45468F3DDF59BEDE3D0D2CD3952E298
|
||||||
```
|
```
|
||||||
|
|
||||||
### LTO
|
Update package database:
|
||||||
|
```
|
||||||
Enabled for all packages built after 04 Nov 2021 12:07:00
|
pacman -Sy
|
||||||
UTC. [More details.](https://somegit.dev/ALHP/ALHP.GO/issues/52)
|
|
||||||
LTO status is visible per package on the package status page.
|
|
||||||
|
|
||||||
### Linux Kernel packages
|
|
||||||
|
|
||||||
`KCFLAGS`/`KCPPFLAGS` are used to build the kernel packages with our additional flags.
|
|
||||||
|
|
||||||
### Directly linked kernel modules
|
|
||||||
|
|
||||||
Due to our increase in pkgrel, building the kernel packages **will break any directly linked modules** such as `nvidia`
|
|
||||||
(not `nvidia-dkms`) or `virtualbox-host-modules-arch` (not `virtualbox-host-dkms`). **Their respective `dkms`-variant is
|
|
||||||
not affected**. This issue is being tracked in #68, a solution is being worked on.
|
|
||||||
|
|
||||||
### Mirrors
|
|
||||||
|
|
||||||
You want to mirror ALHP? You are welcome to do
|
|
||||||
so, [see alhp-mirrorlist for how to become one](https://somegit.dev/ALHP/alhp-mirrorlist#how-to-become-a-mirror).
|
|
||||||
|
|
||||||
### What packages are built
|
|
||||||
|
|
||||||
Packages [excluded](https://www.reddit.com/r/archlinux/comments/oflged/alhp_archlinux_recompiled_for_x8664v3_experimental/h4fkinu?utm_source=share&utm_medium=web2x&context=3)
|
|
||||||
from building (besides all `any` architecture packages) are being listed in issue #16.
|
|
||||||
See also [package status page](https://status.alhp.dev) (search for `blacklisted`).
|
|
||||||
|
|
||||||
### Why is package X not up-to-date
|
|
||||||
|
|
||||||
Also relevant for: **I can't find package X / Application X fails to start because it links to an old/newer lib**
|
|
||||||
|
|
||||||
ALHP builds packages **after** they are released in the official Archlinux repos (excluding `[*-testing]`).
|
|
||||||
This will cause packages to be delayed if the current batch contains many packages, or packages that take a while to
|
|
||||||
build (e.g. `chromium`).
|
|
||||||
|
|
||||||
You can always check on the progress of the current build cycle on the [package status page](https://status.alhp.dev).
|
|
||||||
Please refrain from opening issues caused by packages currently in queue/not yet build/not yet moved to the repo.
|
|
||||||
Please keep in mind that large rebuilds such as `openssl` or `python` can take days to complete on our current build
|
|
||||||
hardware.
|
|
||||||
|
|
||||||
### Debug symbols
|
|
||||||
|
|
||||||
ALHP provides a debuginfod instance under `debuginfod.alhp.dev`.
|
|
||||||
|
|
||||||
To use it, have `debuginfod` installed on your system and add it to your `DEBUGINFOD_URLS` with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
echo "https://debuginfod.alhp.dev" > /etc/debuginfod/alhp.urls
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Switch between levels
|
## Replace packages
|
||||||
|
Following command reinstalls all packages found in the repo **extra-x86-64-v3** that are already installed.
|
||||||
|
Replace `extra-x86-64-v3` with whatever repo you want to install.
|
||||||
|
|
||||||
If you want to switch between levels, e.g. from `x86-64-v3` to `x86-64-v4`, you need to revert to official packages
|
```shell script
|
||||||
first, and then enable your desired repos again.
|
pacman -S $(pacman -Sl extra-x86-64-v3 | grep installed | cut -f 2 -d " " | perl -pe 's/\R/ /g;')
|
||||||
|
```
|
||||||
|
|
||||||
1. Comment out or remove the ALHP repo entries in `/etc/pacman.conf`.
|
This is only needed once, new updates are coming from this new repo then, as usual.
|
||||||
2. Downgrade packages with `pacman -Suuy`.
|
|
||||||
3. Clear pacman's package cache with `pacman -Scc`.
|
|
||||||
4. Uncomment/add your desired repos to `/etc/pacman.conf` and update with `pacman -Suy`.
|
|
||||||
|
|
||||||
## Matrix
|
|
||||||
|
|
||||||
For any non-issue questions, or if you just want to chat, ALHP has a Matrix
|
|
||||||
room [here](https://matrix.to/#/#alhp:ofsg.eu) (`#alhp@ofsg.eu`). You can also find me (@idlegandalf)
|
|
||||||
in `#archlinux:archlinux.org`.
|
|
||||||
|
|
||||||
## Donations
|
|
||||||
|
|
||||||
I appreciate any money you want to throw my way, but donations are strictly optional. Donations are primarily used to
|
|
||||||
pay for server costs. Also consider [donating to the **Archlinux Team**](https://archlinux.org/donate/), without their
|
|
||||||
work ALHP would not be possible.
|
|
||||||
|
|
||||||
[](https://liberapay.com/anonfunc/)
|
|
||||||
|
|
||||||
## License and Legal
|
|
||||||
|
|
||||||
This project and all of its source code is released under the terms of the GNU General Public License, version 2
|
|
||||||
or any later version. See [LICENSE](https://somegit.dev/ALHP/ALHP.GO/src/branch/master/LICENSE) for details.
|
|
@@ -1,5 +1,5 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Go based Archlinux instruction-set enabled repo build manager.
|
Description=Go based Archlinux instructionset enabled repo build manager.
|
||||||
After=network.target
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
@@ -12,10 +12,6 @@ TimeoutStopSec=5min
|
|||||||
MemoryHigh=30G
|
MemoryHigh=30G
|
||||||
CPUQuota=700%
|
CPUQuota=700%
|
||||||
Nice=15
|
Nice=15
|
||||||
CPUSchedulingPolicy=batch
|
|
||||||
IOSchedulingClass=best-effort
|
|
||||||
IOSchedulingPriority=7
|
|
||||||
IOWeight=100
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
480
buildmanager.go
480
buildmanager.go
@@ -1,480 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/c2h5oh/datasize"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/sethvargo/go-retry"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const MaxUnknownBuilder = 2
|
|
||||||
|
|
||||||
type BuildManager struct {
|
|
||||||
repoPurge map[string]chan []*ProtoPackage
|
|
||||||
repoAdd map[string]chan []*ProtoPackage
|
|
||||||
repoWG *sync.WaitGroup
|
|
||||||
alpmMutex *sync.RWMutex
|
|
||||||
building []*ProtoPackage
|
|
||||||
buildingLock *sync.RWMutex
|
|
||||||
queueSignal chan struct{}
|
|
||||||
metrics struct {
|
|
||||||
queueSize *prometheus.GaugeVec
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) error {
|
|
||||||
var (
|
|
||||||
doneQ []*ProtoPackage
|
|
||||||
doneQLock = new(sync.RWMutex)
|
|
||||||
unknownBuilds bool
|
|
||||||
queueNoMatch bool
|
|
||||||
)
|
|
||||||
|
|
||||||
for len(doneQ) != len(queue) {
|
|
||||||
up := 0
|
|
||||||
b.buildingLock.RLock()
|
|
||||||
if (pkgList2MaxMem(b.building) < conf.Build.MemoryLimit &&
|
|
||||||
!unknownBuilds && !queueNoMatch) ||
|
|
||||||
(unknownBuilds && len(b.building) < MaxUnknownBuilder) {
|
|
||||||
queueNoMatch = true
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
for _, pkg := range queue {
|
|
||||||
// check if package is already build
|
|
||||||
doneQLock.RLock()
|
|
||||||
if ContainsPkg(doneQ, pkg, true) {
|
|
||||||
doneQLock.RUnlock()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
doneQLock.RUnlock()
|
|
||||||
|
|
||||||
// check if package is already building (we do not build packages from different marchs simultaneously)
|
|
||||||
b.buildingLock.RLock()
|
|
||||||
if ContainsPkg(b.building, pkg, false) {
|
|
||||||
log.Debugf("[Q] skipped already building package %s->%s", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
|
|
||||||
// only check for memory on known-memory-builds
|
|
||||||
// otherwise build them one-at-a-time
|
|
||||||
// TODO: add initial compile mode for new repos
|
|
||||||
if !unknownBuilds {
|
|
||||||
// check if package has unknown memory usage
|
|
||||||
if pkg.DBPackage.MaxRss == nil {
|
|
||||||
log.Debugf("[Q] skipped unknown package %s->%s", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
up++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if package can be built with current memory limit
|
|
||||||
if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit { //nolint:gosec
|
|
||||||
log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase,
|
|
||||||
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit) //nolint:gosec
|
|
||||||
doneQLock.Lock()
|
|
||||||
doneQ = append(doneQ, pkg)
|
|
||||||
doneQLock.Unlock()
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b.buildingLock.RLock()
|
|
||||||
currentMemLoad := pkgList2MaxMem(b.building)
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
|
|
||||||
// check if package can be build right now
|
|
||||||
if currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit { //nolint:gosec
|
|
||||||
log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s",
|
|
||||||
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March) //nolint:gosec
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b.buildingLock.RLock()
|
|
||||||
if len(b.building) >= MaxUnknownBuilder {
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
b.buildingLock.Lock()
|
|
||||||
b.building = append(b.building, pkg)
|
|
||||||
b.buildingLock.Unlock()
|
|
||||||
queueNoMatch = false
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Inc()
|
|
||||||
|
|
||||||
go func(pkg *ProtoPackage) {
|
|
||||||
dur, err := pkg.build(ctx)
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Dec()
|
|
||||||
if err != nil && !errors.Is(err, ErrorNotEligible) {
|
|
||||||
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
|
|
||||||
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
} else if err == nil {
|
|
||||||
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "built").Inc()
|
|
||||||
}
|
|
||||||
doneQLock.Lock()
|
|
||||||
b.buildingLock.Lock()
|
|
||||||
doneQ = append(doneQ, pkg)
|
|
||||||
|
|
||||||
for i := 0; i < len(b.building); i++ {
|
|
||||||
if b.building[i].PkgbaseEquals(pkg, true) {
|
|
||||||
b.building = append(b.building[:i], b.building[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
doneQLock.Unlock()
|
|
||||||
b.buildingLock.Unlock()
|
|
||||||
b.queueSignal <- struct{}{}
|
|
||||||
}(pkg)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Debugf("[Q] memory/build limit reached, waiting for package to finish...")
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
<-b.queueSignal
|
|
||||||
queueNoMatch = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// if only unknown packages are left, enable unknown buildmode
|
|
||||||
b.buildingLock.RLock()
|
|
||||||
if up == len(queue)-(len(doneQ)+len(b.building)) {
|
|
||||||
unknownBuilds = true
|
|
||||||
}
|
|
||||||
b.buildingLock.RUnlock()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case pkgL := <-b.repoAdd[repo]:
|
|
||||||
b.repoWG.Add(1)
|
|
||||||
toAdd := make([]string, 0)
|
|
||||||
for _, pkg := range pkgL {
|
|
||||||
toAdd = append(toAdd, pkg.PkgFiles...)
|
|
||||||
}
|
|
||||||
|
|
||||||
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"}
|
|
||||||
args = append(args, toAdd...)
|
|
||||||
cmd := exec.CommandContext(ctx, "repo-add", args...)
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil && cmd.ProcessState.ExitCode() != 1 {
|
|
||||||
log.Panicf("%s while repo-add: %v", string(res), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pkg := range pkgL {
|
|
||||||
err = pkg.toDBPackage(ctx, true)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pkgUpd := pkg.DBPackage.Update().
|
|
||||||
SetStatus(dbpackage.StatusLatest).
|
|
||||||
ClearSkipReason().
|
|
||||||
SetRepoVersion(pkg.Version).
|
|
||||||
SetTagRev(pkg.State.TagRev)
|
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March,
|
|
||||||
pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
|
|
||||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsAvailable)
|
|
||||||
} else {
|
|
||||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
|
|
||||||
}
|
|
||||||
if pkg.DBPackage, err = pkgUpd.Save(ctx); err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd = exec.CommandContext(ctx, "paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
|
|
||||||
res, err = cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error running paccache: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = updateLastUpdated()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error updating lastupdate: %v", err)
|
|
||||||
}
|
|
||||||
b.repoWG.Done()
|
|
||||||
case pkgL := <-b.repoPurge[repo]:
|
|
||||||
for _, pkg := range pkgL {
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(pkg.PkgFiles) == 0 {
|
|
||||||
if err := pkg.findPkgFiles(); err != nil {
|
|
||||||
log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
|
||||||
continue
|
|
||||||
} else if len(pkg.PkgFiles) == 0 {
|
|
||||||
if pkg.DBPackage != nil {
|
|
||||||
err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var realPkgs []string
|
|
||||||
for _, filePath := range pkg.PkgFiles {
|
|
||||||
if _, err := os.Stat(filePath); err == nil {
|
|
||||||
realPkgs = append(realPkgs, Package(filePath).Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(realPkgs) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b.repoWG.Add(1)
|
|
||||||
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
|
||||||
args = append(args, realPkgs...)
|
|
||||||
cmd := exec.CommandContext(ctx, "repo-remove", args...)
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
|
||||||
log.Warningf("error while deleting package %s: %s", pkg.Pkgbase, string(res))
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DBPackage != nil {
|
|
||||||
err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range pkg.PkgFiles {
|
|
||||||
_ = os.Remove(file)
|
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
}
|
|
||||||
err = updateLastUpdated()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error updating lastupdate: %v", err)
|
|
||||||
}
|
|
||||||
b.repoWG.Done()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|
||||||
err := os.MkdirAll(conf.Basedir.Work, 0o755)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gitPath := filepath.Join(conf.Basedir.Work, stateDir)
|
|
||||||
for {
|
|
||||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
|
||||||
cmd := exec.CommandContext(ctx, "git", "clone", "--depth=1", conf.StateRepo, gitPath) //nolint:gosec
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error cloning state repo: %v", err)
|
|
||||||
}
|
|
||||||
} else if err == nil {
|
|
||||||
cmd := exec.CommandContext(ctx, "git", "reset", "--hard")
|
|
||||||
cmd.Dir = gitPath
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error reseting state repo: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd = exec.CommandContext(ctx, "git", "pull")
|
|
||||||
cmd.Dir = gitPath
|
|
||||||
res, err = cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("failed to update state repo: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// housekeeping
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
for _, repo := range repos {
|
|
||||||
wg.Add(1)
|
|
||||||
splitRepo := strings.Split(repo, "-")
|
|
||||||
go func() {
|
|
||||||
err := housekeeping(ctx, splitRepo[0], strings.Join(splitRepo[1:], "-"), wg)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[%s] housekeeping failed: %v", repo, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
err := logHK(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("log-housekeeping failed: %v", err)
|
|
||||||
}
|
|
||||||
debugHK()
|
|
||||||
|
|
||||||
// fetch updates between sync runs
|
|
||||||
b.alpmMutex.Lock()
|
|
||||||
err = alpmHandle.Release()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error releasing ALPM handle: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := retry.Fibonacci(ctx, 1*time.Second, func(_ context.Context) error {
|
|
||||||
if err := setupChroot(ctx); err != nil {
|
|
||||||
log.Warningf("unable to upgrade chroot, trying again later")
|
|
||||||
return retry.RetryableError(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error while alpm-init: %v", err)
|
|
||||||
}
|
|
||||||
b.alpmMutex.Unlock()
|
|
||||||
|
|
||||||
queue, err := b.genQueue(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error building queue: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("build-queue with %d items", len(queue))
|
|
||||||
err = b.buildQueue(ctx, queue)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.Err() == nil {
|
|
||||||
for _, repo := range repos {
|
|
||||||
err = movePackagesLive(ctx, repo)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[%s] error moving packages live: %v", repo, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
b.metrics.queueSize.Reset()
|
|
||||||
log.Debugf("build-cycle finished")
|
|
||||||
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
|
|
||||||
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error scanning for state-files: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkgbuilds []*ProtoPackage
|
|
||||||
for _, stateFile := range stateFiles {
|
|
||||||
stat, err := os.Stat(stateFile)
|
|
||||||
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") || strings.Contains(stateFile, "README.md") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
repo, subRepo, arch, err := stateFileMeta(stateFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] error generating statefile metadata %s: %v", stateFile, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !Contains(conf.Repos, repo) || (subRepo != nil && Contains(conf.Blacklist.Repo, *subRepo)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rawState, err := os.ReadFile(stateFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] cannot read statefile %s: %v", stateFile, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
state, err := parseState(string(rawState))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] cannot parse statefile %s: %v", stateFile, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, march := range conf.March {
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
Pkgbase: state.Pkgbase,
|
|
||||||
Repo: dbpackage.Repository(repo),
|
|
||||||
March: march,
|
|
||||||
FullRepo: repo + "-" + march,
|
|
||||||
State: state,
|
|
||||||
Version: state.PkgVer,
|
|
||||||
Arch: arch,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = pkg.toDBPackage(ctx, false)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pkg.isAvailable(ctx, alpmHandle) {
|
|
||||||
log.Debugf("[QG] %s->%s not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
aBuild, err := pkg.IsBuilt()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] %s->%s error determining built packages: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
|
||||||
}
|
|
||||||
if aBuild {
|
|
||||||
log.Infof("[QG] %s->%s already built, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DBPackage == nil {
|
|
||||||
err = pkg.toDBPackage(ctx, true)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DBPackage.TagRev != nil && *pkg.DBPackage.TagRev == state.TagRev {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// try download .SRCINFO from repo
|
|
||||||
srcInfo, err := downloadSRCINFO(pkg.DBPackage.Pkgbase, state.TagRev)
|
|
||||||
if err == nil {
|
|
||||||
pkg.Srcinfo = srcInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pkg.isEligible(ctx) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetStatus(dbpackage.StatusQueued).Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] error updating dbpackage %s: %v", state.Pkgbase, err)
|
|
||||||
}
|
|
||||||
pkgbuilds = append(pkgbuilds, pkg)
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkgbuilds, nil
|
|
||||||
}
|
|
31
config.yaml
Normal file
31
config.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
arch: x86_64
|
||||||
|
repos:
|
||||||
|
- core
|
||||||
|
- extra
|
||||||
|
- community
|
||||||
|
|
||||||
|
svn2git:
|
||||||
|
upstream-core-extra: "https://github.com/archlinux/svntogit-packages.git"
|
||||||
|
upstream-community: "https://github.com/archlinux/svntogit-community.git"
|
||||||
|
|
||||||
|
basedir:
|
||||||
|
repo: /var/lib/alhp/repo/
|
||||||
|
chroot: /var/lib/alhp/chroot/
|
||||||
|
makepkg: /var/lib/alhp/makepkg/
|
||||||
|
upstream: /var/lib/alhp/upstream/
|
||||||
|
|
||||||
|
march:
|
||||||
|
- x86-64-v3
|
||||||
|
|
||||||
|
blacklist:
|
||||||
|
- pacman
|
||||||
|
- tensorflow
|
||||||
|
- tensorflow-cuda
|
||||||
|
- gcc
|
||||||
|
|
||||||
|
build:
|
||||||
|
worker: 4
|
||||||
|
makej: 8
|
||||||
|
|
||||||
|
logging:
|
||||||
|
level: DEBUG
|
@@ -1,50 +0,0 @@
|
|||||||
arch: x86_64
|
|
||||||
repos:
|
|
||||||
- core
|
|
||||||
- extra
|
|
||||||
|
|
||||||
state_repo: "https://gitlab.archlinux.org/archlinux/packaging/state.git"
|
|
||||||
|
|
||||||
max_clone_retries: 100
|
|
||||||
|
|
||||||
db:
|
|
||||||
driver: pgx
|
|
||||||
connect_to: "postgres://username:password@localhost:5432/database_name"
|
|
||||||
|
|
||||||
basedir:
|
|
||||||
repo: /var/lib/alhp/repo/
|
|
||||||
work: /var/lib/alhp/workspace/
|
|
||||||
debug: /var/lib/alhp/debug/
|
|
||||||
|
|
||||||
march:
|
|
||||||
- x86-64-v3
|
|
||||||
|
|
||||||
blacklist:
|
|
||||||
packages:
|
|
||||||
- tensorflow
|
|
||||||
- tensorflow-cuda
|
|
||||||
- gcc
|
|
||||||
repo:
|
|
||||||
- testing
|
|
||||||
- i686
|
|
||||||
- staging
|
|
||||||
- unstable
|
|
||||||
lto:
|
|
||||||
- llvm
|
|
||||||
- rust
|
|
||||||
|
|
||||||
build:
|
|
||||||
# number of workers total
|
|
||||||
worker: 4
|
|
||||||
makej: 8
|
|
||||||
checks: true
|
|
||||||
# how much memory ALHP should use
|
|
||||||
# this will also decide how many builds will run concurrently,
|
|
||||||
# since ALHP will try to optimise the queue for speed while not going over this limit
|
|
||||||
memory_limit: "16gb"
|
|
||||||
|
|
||||||
logging:
|
|
||||||
level: INFO
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
port: 9568
|
|
340
ent/client.go
340
ent/client.go
@@ -1,340 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client is the client that holds all ent builders.
|
|
||||||
type Client struct {
|
|
||||||
config
|
|
||||||
// Schema is the client for creating, migrating and dropping schema.
|
|
||||||
Schema *migrate.Schema
|
|
||||||
// DBPackage is the client for interacting with the DBPackage builders.
|
|
||||||
DBPackage *DBPackageClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient creates a new client configured with the given options.
|
|
||||||
func NewClient(opts ...Option) *Client {
|
|
||||||
client := &Client{config: newConfig(opts...)}
|
|
||||||
client.init()
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) init() {
|
|
||||||
c.Schema = migrate.NewSchema(c.driver)
|
|
||||||
c.DBPackage = NewDBPackageClient(c.config)
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// config is the configuration for the client and its builder.
|
|
||||||
config struct {
|
|
||||||
// driver used for executing database requests.
|
|
||||||
driver dialect.Driver
|
|
||||||
// debug enable a debug logging.
|
|
||||||
debug bool
|
|
||||||
// log used for logging on debug mode.
|
|
||||||
log func(...any)
|
|
||||||
// hooks to execute on mutations.
|
|
||||||
hooks *hooks
|
|
||||||
// interceptors to execute on queries.
|
|
||||||
inters *inters
|
|
||||||
}
|
|
||||||
// Option function to configure the client.
|
|
||||||
Option func(*config)
|
|
||||||
)
|
|
||||||
|
|
||||||
// newConfig creates a new config for the client.
|
|
||||||
func newConfig(opts ...Option) config {
|
|
||||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
|
||||||
cfg.options(opts...)
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// options applies the options on the config object.
|
|
||||||
func (c *config) options(opts ...Option) {
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(c)
|
|
||||||
}
|
|
||||||
if c.debug {
|
|
||||||
c.driver = dialect.Debug(c.driver, c.log)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug enables debug logging on the ent.Driver.
|
|
||||||
func Debug() Option {
|
|
||||||
return func(c *config) {
|
|
||||||
c.debug = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log sets the logging function for debug mode.
|
|
||||||
func Log(fn func(...any)) Option {
|
|
||||||
return func(c *config) {
|
|
||||||
c.log = fn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver configures the client driver.
|
|
||||||
func Driver(driver dialect.Driver) Option {
|
|
||||||
return func(c *config) {
|
|
||||||
c.driver = driver
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens a database/sql.DB specified by the driver name and
|
|
||||||
// the data source name, and returns a new client attached to it.
|
|
||||||
// Optional parameters can be added for configuring the client.
|
|
||||||
func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
|
|
||||||
switch driverName {
|
|
||||||
case dialect.MySQL, dialect.Postgres, dialect.SQLite:
|
|
||||||
drv, err := sql.Open(driverName, dataSourceName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewClient(append(options, Driver(drv))...), nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported driver: %q", driverName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
|
|
||||||
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
|
|
||||||
|
|
||||||
// Tx returns a new transactional client. The provided context
|
|
||||||
// is used until the transaction is committed or rolled back.
|
|
||||||
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
|
||||||
if _, ok := c.driver.(*txDriver); ok {
|
|
||||||
return nil, ErrTxStarted
|
|
||||||
}
|
|
||||||
tx, err := newTx(ctx, c.driver)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ent: starting a transaction: %w", err)
|
|
||||||
}
|
|
||||||
cfg := c.config
|
|
||||||
cfg.driver = tx
|
|
||||||
return &Tx{
|
|
||||||
ctx: ctx,
|
|
||||||
config: cfg,
|
|
||||||
DBPackage: NewDBPackageClient(cfg),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeginTx returns a transactional client with specified options.
|
|
||||||
func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
|
|
||||||
if _, ok := c.driver.(*txDriver); ok {
|
|
||||||
return nil, errors.New("ent: cannot start a transaction within a transaction")
|
|
||||||
}
|
|
||||||
tx, err := c.driver.(interface {
|
|
||||||
BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
|
|
||||||
}).BeginTx(ctx, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ent: starting a transaction: %w", err)
|
|
||||||
}
|
|
||||||
cfg := c.config
|
|
||||||
cfg.driver = &txDriver{tx: tx, drv: c.driver}
|
|
||||||
return &Tx{
|
|
||||||
ctx: ctx,
|
|
||||||
config: cfg,
|
|
||||||
DBPackage: NewDBPackageClient(cfg),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
|
|
||||||
//
|
|
||||||
// client.Debug().
|
|
||||||
// DBPackage.
|
|
||||||
// Query().
|
|
||||||
// Count(ctx)
|
|
||||||
func (c *Client) Debug() *Client {
|
|
||||||
if c.debug {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
cfg := c.config
|
|
||||||
cfg.driver = dialect.Debug(c.driver, c.log)
|
|
||||||
client := &Client{config: cfg}
|
|
||||||
client.init()
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the database connection and prevents new queries from starting.
|
|
||||||
func (c *Client) Close() error {
|
|
||||||
return c.driver.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use adds the mutation hooks to all the entity clients.
|
|
||||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
|
||||||
func (c *Client) Use(hooks ...Hook) {
|
|
||||||
c.DBPackage.Use(hooks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intercept adds the query interceptors to all the entity clients.
|
|
||||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
|
||||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
|
||||||
c.DBPackage.Intercept(interceptors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutate implements the ent.Mutator interface.
|
|
||||||
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
switch m := m.(type) {
|
|
||||||
case *DBPackageMutation:
|
|
||||||
return c.DBPackage.mutate(ctx, m)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("ent: unknown mutation type %T", m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackageClient is a client for the DBPackage schema.
|
|
||||||
type DBPackageClient struct {
|
|
||||||
config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDBPackageClient returns a client for the DBPackage from the given config.
|
|
||||||
func NewDBPackageClient(c config) *DBPackageClient {
|
|
||||||
return &DBPackageClient{config: c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use adds a list of mutation hooks to the hooks stack.
|
|
||||||
// A call to `Use(f, g, h)` equals to `dbpackage.Hooks(f(g(h())))`.
|
|
||||||
func (c *DBPackageClient) Use(hooks ...Hook) {
|
|
||||||
c.hooks.DBPackage = append(c.hooks.DBPackage, hooks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
|
||||||
// A call to `Intercept(f, g, h)` equals to `dbpackage.Intercept(f(g(h())))`.
|
|
||||||
func (c *DBPackageClient) Intercept(interceptors ...Interceptor) {
|
|
||||||
c.inters.DBPackage = append(c.inters.DBPackage, interceptors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create returns a builder for creating a DBPackage entity.
|
|
||||||
func (c *DBPackageClient) Create() *DBPackageCreate {
|
|
||||||
mutation := newDBPackageMutation(c.config, OpCreate)
|
|
||||||
return &DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateBulk returns a builder for creating a bulk of DBPackage entities.
|
|
||||||
func (c *DBPackageClient) CreateBulk(builders ...*DBPackageCreate) *DBPackageCreateBulk {
|
|
||||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
|
||||||
// a builder and applies setFunc on it.
|
|
||||||
func (c *DBPackageClient) MapCreateBulk(slice any, setFunc func(*DBPackageCreate, int)) *DBPackageCreateBulk {
|
|
||||||
rv := reflect.ValueOf(slice)
|
|
||||||
if rv.Kind() != reflect.Slice {
|
|
||||||
return &DBPackageCreateBulk{err: fmt.Errorf("calling to DBPackageClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
|
||||||
}
|
|
||||||
builders := make([]*DBPackageCreate, rv.Len())
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
builders[i] = c.Create()
|
|
||||||
setFunc(builders[i], i)
|
|
||||||
}
|
|
||||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns an update builder for DBPackage.
|
|
||||||
func (c *DBPackageClient) Update() *DBPackageUpdate {
|
|
||||||
mutation := newDBPackageMutation(c.config, OpUpdate)
|
|
||||||
return &DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOne returns an update builder for the given entity.
|
|
||||||
func (c *DBPackageClient) UpdateOne(dp *DBPackage) *DBPackageUpdateOne {
|
|
||||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(dp))
|
|
||||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOneID returns an update builder for the given id.
|
|
||||||
func (c *DBPackageClient) UpdateOneID(id int) *DBPackageUpdateOne {
|
|
||||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackageID(id))
|
|
||||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete returns a delete builder for DBPackage.
|
|
||||||
func (c *DBPackageClient) Delete() *DBPackageDelete {
|
|
||||||
mutation := newDBPackageMutation(c.config, OpDelete)
|
|
||||||
return &DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteOne returns a builder for deleting the given entity.
|
|
||||||
func (c *DBPackageClient) DeleteOne(dp *DBPackage) *DBPackageDeleteOne {
|
|
||||||
return c.DeleteOneID(dp.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
|
||||||
func (c *DBPackageClient) DeleteOneID(id int) *DBPackageDeleteOne {
|
|
||||||
builder := c.Delete().Where(dbpackage.ID(id))
|
|
||||||
builder.mutation.id = &id
|
|
||||||
builder.mutation.op = OpDeleteOne
|
|
||||||
return &DBPackageDeleteOne{builder}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns a query builder for DBPackage.
|
|
||||||
func (c *DBPackageClient) Query() *DBPackageQuery {
|
|
||||||
return &DBPackageQuery{
|
|
||||||
config: c.config,
|
|
||||||
ctx: &QueryContext{Type: TypeDBPackage},
|
|
||||||
inters: c.Interceptors(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a DBPackage entity by its id.
|
|
||||||
func (c *DBPackageClient) Get(ctx context.Context, id int) (*DBPackage, error) {
|
|
||||||
return c.Query().Where(dbpackage.ID(id)).Only(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetX is like Get, but panics if an error occurs.
|
|
||||||
func (c *DBPackageClient) GetX(ctx context.Context, id int) *DBPackage {
|
|
||||||
obj, err := c.Get(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hooks returns the client hooks.
|
|
||||||
func (c *DBPackageClient) Hooks() []Hook {
|
|
||||||
return c.hooks.DBPackage
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interceptors returns the client interceptors.
|
|
||||||
func (c *DBPackageClient) Interceptors() []Interceptor {
|
|
||||||
return c.inters.DBPackage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *DBPackageClient) mutate(ctx context.Context, m *DBPackageMutation) (Value, error) {
|
|
||||||
switch m.Op() {
|
|
||||||
case OpCreate:
|
|
||||||
return (&DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpUpdate:
|
|
||||||
return (&DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpUpdateOne:
|
|
||||||
return (&DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpDelete, OpDeleteOne:
|
|
||||||
return (&DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("ent: unknown DBPackage mutation op: %q", m.Op())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hooks and interceptors per client, for fast access.
|
|
||||||
type (
|
|
||||||
hooks struct {
|
|
||||||
DBPackage []ent.Hook
|
|
||||||
}
|
|
||||||
inters struct {
|
|
||||||
DBPackage []ent.Interceptor
|
|
||||||
}
|
|
||||||
)
|
|
338
ent/dbpackage.go
338
ent/dbpackage.go
@@ -1,338 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DBPackage is the model entity for the DBPackage schema.
|
|
||||||
type DBPackage struct {
|
|
||||||
config `json:"-"`
|
|
||||||
// ID of the ent.
|
|
||||||
ID int `json:"id,omitempty"`
|
|
||||||
// Pkgbase holds the value of the "pkgbase" field.
|
|
||||||
Pkgbase string `json:"pkgbase,omitempty"`
|
|
||||||
// Packages holds the value of the "packages" field.
|
|
||||||
Packages []string `json:"packages,omitempty"`
|
|
||||||
// Status holds the value of the "status" field.
|
|
||||||
Status dbpackage.Status `json:"status,omitempty"`
|
|
||||||
// SkipReason holds the value of the "skip_reason" field.
|
|
||||||
SkipReason string `json:"skip_reason,omitempty"`
|
|
||||||
// Repository holds the value of the "repository" field.
|
|
||||||
Repository dbpackage.Repository `json:"repository,omitempty"`
|
|
||||||
// March holds the value of the "march" field.
|
|
||||||
March string `json:"march,omitempty"`
|
|
||||||
// Version holds the value of the "version" field.
|
|
||||||
Version string `json:"version,omitempty"`
|
|
||||||
// RepoVersion holds the value of the "repo_version" field.
|
|
||||||
RepoVersion string `json:"repo_version,omitempty"`
|
|
||||||
// BuildTimeStart holds the value of the "build_time_start" field.
|
|
||||||
BuildTimeStart time.Time `json:"build_time_start,omitempty"`
|
|
||||||
// Updated holds the value of the "updated" field.
|
|
||||||
Updated time.Time `json:"updated,omitempty"`
|
|
||||||
// Lto holds the value of the "lto" field.
|
|
||||||
Lto dbpackage.Lto `json:"lto,omitempty"`
|
|
||||||
// LastVersionBuild holds the value of the "last_version_build" field.
|
|
||||||
LastVersionBuild string `json:"last_version_build,omitempty"`
|
|
||||||
// LastVerified holds the value of the "last_verified" field.
|
|
||||||
LastVerified time.Time `json:"last_verified,omitempty"`
|
|
||||||
// DebugSymbols holds the value of the "debug_symbols" field.
|
|
||||||
DebugSymbols dbpackage.DebugSymbols `json:"debug_symbols,omitempty"`
|
|
||||||
// MaxRss holds the value of the "max_rss" field.
|
|
||||||
MaxRss *int64 `json:"max_rss,omitempty"`
|
|
||||||
// UTime holds the value of the "u_time" field.
|
|
||||||
UTime *int64 `json:"u_time,omitempty"`
|
|
||||||
// STime holds the value of the "s_time" field.
|
|
||||||
STime *int64 `json:"s_time,omitempty"`
|
|
||||||
// IoIn holds the value of the "io_in" field.
|
|
||||||
IoIn *int64 `json:"io_in,omitempty"`
|
|
||||||
// IoOut holds the value of the "io_out" field.
|
|
||||||
IoOut *int64 `json:"io_out,omitempty"`
|
|
||||||
// TagRev holds the value of the "tag_rev" field.
|
|
||||||
TagRev *string `json:"tag_rev,omitempty"`
|
|
||||||
selectValues sql.SelectValues
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanValues returns the types for scanning values from sql.Rows.
|
|
||||||
func (*DBPackage) scanValues(columns []string) ([]any, error) {
|
|
||||||
values := make([]any, len(columns))
|
|
||||||
for i := range columns {
|
|
||||||
switch columns[i] {
|
|
||||||
case dbpackage.FieldPackages:
|
|
||||||
values[i] = new([]byte)
|
|
||||||
case dbpackage.FieldID, dbpackage.FieldMaxRss, dbpackage.FieldUTime, dbpackage.FieldSTime, dbpackage.FieldIoIn, dbpackage.FieldIoOut:
|
|
||||||
values[i] = new(sql.NullInt64)
|
|
||||||
case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldLto, dbpackage.FieldLastVersionBuild, dbpackage.FieldDebugSymbols, dbpackage.FieldTagRev:
|
|
||||||
values[i] = new(sql.NullString)
|
|
||||||
case dbpackage.FieldBuildTimeStart, dbpackage.FieldUpdated, dbpackage.FieldLastVerified:
|
|
||||||
values[i] = new(sql.NullTime)
|
|
||||||
default:
|
|
||||||
values[i] = new(sql.UnknownType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
|
||||||
// to the DBPackage fields.
|
|
||||||
func (dp *DBPackage) assignValues(columns []string, values []any) error {
|
|
||||||
if m, n := len(values), len(columns); m < n {
|
|
||||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
|
||||||
}
|
|
||||||
for i := range columns {
|
|
||||||
switch columns[i] {
|
|
||||||
case dbpackage.FieldID:
|
|
||||||
value, ok := values[i].(*sql.NullInt64)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field id", value)
|
|
||||||
}
|
|
||||||
dp.ID = int(value.Int64)
|
|
||||||
case dbpackage.FieldPkgbase:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field pkgbase", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.Pkgbase = value.String
|
|
||||||
}
|
|
||||||
case dbpackage.FieldPackages:
|
|
||||||
if value, ok := values[i].(*[]byte); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field packages", values[i])
|
|
||||||
} else if value != nil && len(*value) > 0 {
|
|
||||||
if err := json.Unmarshal(*value, &dp.Packages); err != nil {
|
|
||||||
return fmt.Errorf("unmarshal field packages: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case dbpackage.FieldStatus:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field status", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.Status = dbpackage.Status(value.String)
|
|
||||||
}
|
|
||||||
case dbpackage.FieldSkipReason:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field skip_reason", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.SkipReason = value.String
|
|
||||||
}
|
|
||||||
case dbpackage.FieldRepository:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field repository", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.Repository = dbpackage.Repository(value.String)
|
|
||||||
}
|
|
||||||
case dbpackage.FieldMarch:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field march", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.March = value.String
|
|
||||||
}
|
|
||||||
case dbpackage.FieldVersion:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field version", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.Version = value.String
|
|
||||||
}
|
|
||||||
case dbpackage.FieldRepoVersion:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field repo_version", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.RepoVersion = value.String
|
|
||||||
}
|
|
||||||
case dbpackage.FieldBuildTimeStart:
|
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field build_time_start", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.BuildTimeStart = value.Time
|
|
||||||
}
|
|
||||||
case dbpackage.FieldUpdated:
|
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field updated", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.Updated = value.Time
|
|
||||||
}
|
|
||||||
case dbpackage.FieldLto:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field lto", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.Lto = dbpackage.Lto(value.String)
|
|
||||||
}
|
|
||||||
case dbpackage.FieldLastVersionBuild:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field last_version_build", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.LastVersionBuild = value.String
|
|
||||||
}
|
|
||||||
case dbpackage.FieldLastVerified:
|
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field last_verified", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.LastVerified = value.Time
|
|
||||||
}
|
|
||||||
case dbpackage.FieldDebugSymbols:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field debug_symbols", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.DebugSymbols = dbpackage.DebugSymbols(value.String)
|
|
||||||
}
|
|
||||||
case dbpackage.FieldMaxRss:
|
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field max_rss", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.MaxRss = new(int64)
|
|
||||||
*dp.MaxRss = value.Int64
|
|
||||||
}
|
|
||||||
case dbpackage.FieldUTime:
|
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field u_time", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.UTime = new(int64)
|
|
||||||
*dp.UTime = value.Int64
|
|
||||||
}
|
|
||||||
case dbpackage.FieldSTime:
|
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field s_time", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.STime = new(int64)
|
|
||||||
*dp.STime = value.Int64
|
|
||||||
}
|
|
||||||
case dbpackage.FieldIoIn:
|
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field io_in", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.IoIn = new(int64)
|
|
||||||
*dp.IoIn = value.Int64
|
|
||||||
}
|
|
||||||
case dbpackage.FieldIoOut:
|
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field io_out", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.IoOut = new(int64)
|
|
||||||
*dp.IoOut = value.Int64
|
|
||||||
}
|
|
||||||
case dbpackage.FieldTagRev:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
dp.TagRev = new(string)
|
|
||||||
*dp.TagRev = value.String
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
dp.selectValues.Set(columns[i], values[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
|
|
||||||
// This includes values selected through modifiers, order, etc.
|
|
||||||
func (dp *DBPackage) Value(name string) (ent.Value, error) {
|
|
||||||
return dp.selectValues.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns a builder for updating this DBPackage.
|
|
||||||
// Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
|
|
||||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
|
||||||
func (dp *DBPackage) Update() *DBPackageUpdateOne {
|
|
||||||
return NewDBPackageClient(dp.config).UpdateOne(dp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
|
|
||||||
// so that all future queries will be executed through the driver which created the transaction.
|
|
||||||
func (dp *DBPackage) Unwrap() *DBPackage {
|
|
||||||
_tx, ok := dp.config.driver.(*txDriver)
|
|
||||||
if !ok {
|
|
||||||
panic("ent: DBPackage is not a transactional entity")
|
|
||||||
}
|
|
||||||
dp.config.driver = _tx.drv
|
|
||||||
return dp
|
|
||||||
}
|
|
||||||
|
|
||||||
// String implements the fmt.Stringer.
|
|
||||||
func (dp *DBPackage) String() string {
|
|
||||||
var builder strings.Builder
|
|
||||||
builder.WriteString("DBPackage(")
|
|
||||||
builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID))
|
|
||||||
builder.WriteString("pkgbase=")
|
|
||||||
builder.WriteString(dp.Pkgbase)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("packages=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", dp.Packages))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("status=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", dp.Status))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("skip_reason=")
|
|
||||||
builder.WriteString(dp.SkipReason)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("repository=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", dp.Repository))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("march=")
|
|
||||||
builder.WriteString(dp.March)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("version=")
|
|
||||||
builder.WriteString(dp.Version)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("repo_version=")
|
|
||||||
builder.WriteString(dp.RepoVersion)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("build_time_start=")
|
|
||||||
builder.WriteString(dp.BuildTimeStart.Format(time.ANSIC))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("updated=")
|
|
||||||
builder.WriteString(dp.Updated.Format(time.ANSIC))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("lto=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", dp.Lto))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("last_version_build=")
|
|
||||||
builder.WriteString(dp.LastVersionBuild)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("last_verified=")
|
|
||||||
builder.WriteString(dp.LastVerified.Format(time.ANSIC))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("debug_symbols=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", dp.DebugSymbols))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
if v := dp.MaxRss; v != nil {
|
|
||||||
builder.WriteString("max_rss=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
|
||||||
}
|
|
||||||
builder.WriteString(", ")
|
|
||||||
if v := dp.UTime; v != nil {
|
|
||||||
builder.WriteString("u_time=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
|
||||||
}
|
|
||||||
builder.WriteString(", ")
|
|
||||||
if v := dp.STime; v != nil {
|
|
||||||
builder.WriteString("s_time=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
|
||||||
}
|
|
||||||
builder.WriteString(", ")
|
|
||||||
if v := dp.IoIn; v != nil {
|
|
||||||
builder.WriteString("io_in=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
|
||||||
}
|
|
||||||
builder.WriteString(", ")
|
|
||||||
if v := dp.IoOut; v != nil {
|
|
||||||
builder.WriteString("io_out=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
|
||||||
}
|
|
||||||
builder.WriteString(", ")
|
|
||||||
if v := dp.TagRev; v != nil {
|
|
||||||
builder.WriteString("tag_rev=")
|
|
||||||
builder.WriteString(*v)
|
|
||||||
}
|
|
||||||
builder.WriteByte(')')
|
|
||||||
return builder.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackages is a parsable slice of DBPackage.
|
|
||||||
type DBPackages []*DBPackage
|
|
@@ -1,315 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package dbpackage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Label holds the string label denoting the dbpackage type in the database.
|
|
||||||
Label = "db_package"
|
|
||||||
// FieldID holds the string denoting the id field in the database.
|
|
||||||
FieldID = "id"
|
|
||||||
// FieldPkgbase holds the string denoting the pkgbase field in the database.
|
|
||||||
FieldPkgbase = "pkgbase"
|
|
||||||
// FieldPackages holds the string denoting the packages field in the database.
|
|
||||||
FieldPackages = "packages"
|
|
||||||
// FieldStatus holds the string denoting the status field in the database.
|
|
||||||
FieldStatus = "status"
|
|
||||||
// FieldSkipReason holds the string denoting the skip_reason field in the database.
|
|
||||||
FieldSkipReason = "skip_reason"
|
|
||||||
// FieldRepository holds the string denoting the repository field in the database.
|
|
||||||
FieldRepository = "repository"
|
|
||||||
// FieldMarch holds the string denoting the march field in the database.
|
|
||||||
FieldMarch = "march"
|
|
||||||
// FieldVersion holds the string denoting the version field in the database.
|
|
||||||
FieldVersion = "version"
|
|
||||||
// FieldRepoVersion holds the string denoting the repo_version field in the database.
|
|
||||||
FieldRepoVersion = "repo_version"
|
|
||||||
// FieldBuildTimeStart holds the string denoting the build_time_start field in the database.
|
|
||||||
FieldBuildTimeStart = "build_time_start"
|
|
||||||
// FieldUpdated holds the string denoting the updated field in the database.
|
|
||||||
FieldUpdated = "updated"
|
|
||||||
// FieldLto holds the string denoting the lto field in the database.
|
|
||||||
FieldLto = "lto"
|
|
||||||
// FieldLastVersionBuild holds the string denoting the last_version_build field in the database.
|
|
||||||
FieldLastVersionBuild = "last_version_build"
|
|
||||||
// FieldLastVerified holds the string denoting the last_verified field in the database.
|
|
||||||
FieldLastVerified = "last_verified"
|
|
||||||
// FieldDebugSymbols holds the string denoting the debug_symbols field in the database.
|
|
||||||
FieldDebugSymbols = "debug_symbols"
|
|
||||||
// FieldMaxRss holds the string denoting the max_rss field in the database.
|
|
||||||
FieldMaxRss = "max_rss"
|
|
||||||
// FieldUTime holds the string denoting the u_time field in the database.
|
|
||||||
FieldUTime = "u_time"
|
|
||||||
// FieldSTime holds the string denoting the s_time field in the database.
|
|
||||||
FieldSTime = "s_time"
|
|
||||||
// FieldIoIn holds the string denoting the io_in field in the database.
|
|
||||||
FieldIoIn = "io_in"
|
|
||||||
// FieldIoOut holds the string denoting the io_out field in the database.
|
|
||||||
FieldIoOut = "io_out"
|
|
||||||
// FieldTagRev holds the string denoting the tag_rev field in the database.
|
|
||||||
FieldTagRev = "tag_rev"
|
|
||||||
// Table holds the table name of the dbpackage in the database.
|
|
||||||
Table = "db_packages"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Columns holds all SQL columns for dbpackage fields.
|
|
||||||
var Columns = []string{
|
|
||||||
FieldID,
|
|
||||||
FieldPkgbase,
|
|
||||||
FieldPackages,
|
|
||||||
FieldStatus,
|
|
||||||
FieldSkipReason,
|
|
||||||
FieldRepository,
|
|
||||||
FieldMarch,
|
|
||||||
FieldVersion,
|
|
||||||
FieldRepoVersion,
|
|
||||||
FieldBuildTimeStart,
|
|
||||||
FieldUpdated,
|
|
||||||
FieldLto,
|
|
||||||
FieldLastVersionBuild,
|
|
||||||
FieldLastVerified,
|
|
||||||
FieldDebugSymbols,
|
|
||||||
FieldMaxRss,
|
|
||||||
FieldUTime,
|
|
||||||
FieldSTime,
|
|
||||||
FieldIoIn,
|
|
||||||
FieldIoOut,
|
|
||||||
FieldTagRev,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
|
||||||
func ValidColumn(column string) bool {
|
|
||||||
for i := range Columns {
|
|
||||||
if column == Columns[i] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// PkgbaseValidator is a validator for the "pkgbase" field. It is called by the builders before save.
|
|
||||||
PkgbaseValidator func(string) error
|
|
||||||
// MarchValidator is a validator for the "march" field. It is called by the builders before save.
|
|
||||||
MarchValidator func(string) error
|
|
||||||
)
|
|
||||||
|
|
||||||
// Status defines the type for the "status" enum field.
|
|
||||||
type Status string
|
|
||||||
|
|
||||||
// StatusUnknown is the default value of the Status enum.
|
|
||||||
const DefaultStatus = StatusUnknown
|
|
||||||
|
|
||||||
// Status values.
|
|
||||||
const (
|
|
||||||
StatusSkipped Status = "skipped"
|
|
||||||
StatusFailed Status = "failed"
|
|
||||||
StatusBuilt Status = "built"
|
|
||||||
StatusQueued Status = "queued"
|
|
||||||
StatusDelayed Status = "delayed"
|
|
||||||
StatusBuilding Status = "building"
|
|
||||||
StatusLatest Status = "latest"
|
|
||||||
StatusSigning Status = "signing"
|
|
||||||
StatusUnknown Status = "unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s Status) String() string {
|
|
||||||
return string(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
|
|
||||||
func StatusValidator(s Status) error {
|
|
||||||
switch s {
|
|
||||||
case StatusSkipped, StatusFailed, StatusBuilt, StatusQueued, StatusDelayed, StatusBuilding, StatusLatest, StatusSigning, StatusUnknown:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("dbpackage: invalid enum value for status field: %q", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Repository defines the type for the "repository" enum field.
|
|
||||||
type Repository string
|
|
||||||
|
|
||||||
// Repository values.
|
|
||||||
const (
|
|
||||||
RepositoryExtra Repository = "extra"
|
|
||||||
RepositoryCore Repository = "core"
|
|
||||||
RepositoryMultilib Repository = "multilib"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r Repository) String() string {
|
|
||||||
return string(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RepositoryValidator is a validator for the "repository" field enum values. It is called by the builders before save.
|
|
||||||
func RepositoryValidator(r Repository) error {
|
|
||||||
switch r {
|
|
||||||
case RepositoryExtra, RepositoryCore, RepositoryMultilib:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("dbpackage: invalid enum value for repository field: %q", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lto defines the type for the "lto" enum field.
|
|
||||||
type Lto string
|
|
||||||
|
|
||||||
// LtoUnknown is the default value of the Lto enum.
|
|
||||||
const DefaultLto = LtoUnknown
|
|
||||||
|
|
||||||
// Lto values.
|
|
||||||
const (
|
|
||||||
LtoEnabled Lto = "enabled"
|
|
||||||
LtoUnknown Lto = "unknown"
|
|
||||||
LtoDisabled Lto = "disabled"
|
|
||||||
LtoAutoDisabled Lto = "auto_disabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (l Lto) String() string {
|
|
||||||
return string(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LtoValidator is a validator for the "lto" field enum values. It is called by the builders before save.
|
|
||||||
func LtoValidator(l Lto) error {
|
|
||||||
switch l {
|
|
||||||
case LtoEnabled, LtoUnknown, LtoDisabled, LtoAutoDisabled:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("dbpackage: invalid enum value for lto field: %q", l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DebugSymbols defines the type for the "debug_symbols" enum field.
|
|
||||||
type DebugSymbols string
|
|
||||||
|
|
||||||
// DebugSymbolsUnknown is the default value of the DebugSymbols enum.
|
|
||||||
const DefaultDebugSymbols = DebugSymbolsUnknown
|
|
||||||
|
|
||||||
// DebugSymbols values.
|
|
||||||
const (
|
|
||||||
DebugSymbolsAvailable DebugSymbols = "available"
|
|
||||||
DebugSymbolsUnknown DebugSymbols = "unknown"
|
|
||||||
DebugSymbolsNotAvailable DebugSymbols = "not_available"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ds DebugSymbols) String() string {
|
|
||||||
return string(ds)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DebugSymbolsValidator is a validator for the "debug_symbols" field enum values. It is called by the builders before save.
|
|
||||||
func DebugSymbolsValidator(ds DebugSymbols) error {
|
|
||||||
switch ds {
|
|
||||||
case DebugSymbolsAvailable, DebugSymbolsUnknown, DebugSymbolsNotAvailable:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("dbpackage: invalid enum value for debug_symbols field: %q", ds)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the DBPackage queries.
|
|
||||||
type OrderOption func(*sql.Selector)
|
|
||||||
|
|
||||||
// ByID orders the results by the id field.
|
|
||||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByPkgbase orders the results by the pkgbase field.
|
|
||||||
func ByPkgbase(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldPkgbase, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByStatus orders the results by the status field.
|
|
||||||
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BySkipReason orders the results by the skip_reason field.
|
|
||||||
func BySkipReason(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldSkipReason, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByRepository orders the results by the repository field.
|
|
||||||
func ByRepository(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldRepository, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByMarch orders the results by the march field.
|
|
||||||
func ByMarch(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldMarch, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByVersion orders the results by the version field.
|
|
||||||
func ByVersion(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldVersion, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByRepoVersion orders the results by the repo_version field.
|
|
||||||
func ByRepoVersion(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldRepoVersion, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByBuildTimeStart orders the results by the build_time_start field.
|
|
||||||
func ByBuildTimeStart(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldBuildTimeStart, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByUpdated orders the results by the updated field.
|
|
||||||
func ByUpdated(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldUpdated, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByLto orders the results by the lto field.
|
|
||||||
func ByLto(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldLto, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByLastVersionBuild orders the results by the last_version_build field.
|
|
||||||
func ByLastVersionBuild(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldLastVersionBuild, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByLastVerified orders the results by the last_verified field.
|
|
||||||
func ByLastVerified(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldLastVerified, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByDebugSymbols orders the results by the debug_symbols field.
|
|
||||||
func ByDebugSymbols(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldDebugSymbols, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByMaxRss orders the results by the max_rss field.
|
|
||||||
func ByMaxRss(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldMaxRss, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByUTime orders the results by the u_time field.
|
|
||||||
func ByUTime(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldUTime, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BySTime orders the results by the s_time field.
|
|
||||||
func BySTime(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldSTime, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByIoIn orders the results by the io_in field.
|
|
||||||
func ByIoIn(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldIoIn, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByIoOut orders the results by the io_out field.
|
|
||||||
func ByIoOut(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldIoOut, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByTagRev orders the results by the tag_rev field.
|
|
||||||
func ByTagRev(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldTagRev, opts...).ToFunc()
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@@ -1,556 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DBPackageCreate is the builder for creating a DBPackage entity.
|
|
||||||
type DBPackageCreate struct {
|
|
||||||
config
|
|
||||||
mutation *DBPackageMutation
|
|
||||||
hooks []Hook
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPkgbase sets the "pkgbase" field.
|
|
||||||
func (dpc *DBPackageCreate) SetPkgbase(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetPkgbase(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPackages sets the "packages" field.
|
|
||||||
func (dpc *DBPackageCreate) SetPackages(s []string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetPackages(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatus sets the "status" field.
|
|
||||||
func (dpc *DBPackageCreate) SetStatus(d dbpackage.Status) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetStatus(d)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableStatus(d *dbpackage.Status) *DBPackageCreate {
|
|
||||||
if d != nil {
|
|
||||||
dpc.SetStatus(*d)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSkipReason sets the "skip_reason" field.
|
|
||||||
func (dpc *DBPackageCreate) SetSkipReason(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetSkipReason(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableSkipReason(s *string) *DBPackageCreate {
|
|
||||||
if s != nil {
|
|
||||||
dpc.SetSkipReason(*s)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRepository sets the "repository" field.
|
|
||||||
func (dpc *DBPackageCreate) SetRepository(d dbpackage.Repository) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetRepository(d)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMarch sets the "march" field.
|
|
||||||
func (dpc *DBPackageCreate) SetMarch(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetMarch(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetVersion sets the "version" field.
|
|
||||||
func (dpc *DBPackageCreate) SetVersion(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetVersion(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableVersion sets the "version" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableVersion(s *string) *DBPackageCreate {
|
|
||||||
if s != nil {
|
|
||||||
dpc.SetVersion(*s)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRepoVersion sets the "repo_version" field.
|
|
||||||
func (dpc *DBPackageCreate) SetRepoVersion(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetRepoVersion(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableRepoVersion(s *string) *DBPackageCreate {
|
|
||||||
if s != nil {
|
|
||||||
dpc.SetRepoVersion(*s)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBuildTimeStart sets the "build_time_start" field.
|
|
||||||
func (dpc *DBPackageCreate) SetBuildTimeStart(t time.Time) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetBuildTimeStart(t)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DBPackageCreate {
|
|
||||||
if t != nil {
|
|
||||||
dpc.SetBuildTimeStart(*t)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUpdated sets the "updated" field.
|
|
||||||
func (dpc *DBPackageCreate) SetUpdated(t time.Time) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetUpdated(t)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableUpdated sets the "updated" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableUpdated(t *time.Time) *DBPackageCreate {
|
|
||||||
if t != nil {
|
|
||||||
dpc.SetUpdated(*t)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLto sets the "lto" field.
|
|
||||||
func (dpc *DBPackageCreate) SetLto(d dbpackage.Lto) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetLto(d)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableLto sets the "lto" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableLto(d *dbpackage.Lto) *DBPackageCreate {
|
|
||||||
if d != nil {
|
|
||||||
dpc.SetLto(*d)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLastVersionBuild sets the "last_version_build" field.
|
|
||||||
func (dpc *DBPackageCreate) SetLastVersionBuild(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetLastVersionBuild(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableLastVersionBuild(s *string) *DBPackageCreate {
|
|
||||||
if s != nil {
|
|
||||||
dpc.SetLastVersionBuild(*s)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLastVerified sets the "last_verified" field.
|
|
||||||
func (dpc *DBPackageCreate) SetLastVerified(t time.Time) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetLastVerified(t)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableLastVerified(t *time.Time) *DBPackageCreate {
|
|
||||||
if t != nil {
|
|
||||||
dpc.SetLastVerified(*t)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDebugSymbols sets the "debug_symbols" field.
|
|
||||||
func (dpc *DBPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetDebugSymbols(ds)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DBPackageCreate {
|
|
||||||
if ds != nil {
|
|
||||||
dpc.SetDebugSymbols(*ds)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxRss sets the "max_rss" field.
|
|
||||||
func (dpc *DBPackageCreate) SetMaxRss(i int64) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetMaxRss(i)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableMaxRss(i *int64) *DBPackageCreate {
|
|
||||||
if i != nil {
|
|
||||||
dpc.SetMaxRss(*i)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUTime sets the "u_time" field.
|
|
||||||
func (dpc *DBPackageCreate) SetUTime(i int64) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetUTime(i)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableUTime sets the "u_time" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableUTime(i *int64) *DBPackageCreate {
|
|
||||||
if i != nil {
|
|
||||||
dpc.SetUTime(*i)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSTime sets the "s_time" field.
|
|
||||||
func (dpc *DBPackageCreate) SetSTime(i int64) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetSTime(i)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableSTime sets the "s_time" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableSTime(i *int64) *DBPackageCreate {
|
|
||||||
if i != nil {
|
|
||||||
dpc.SetSTime(*i)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetIoIn sets the "io_in" field.
|
|
||||||
func (dpc *DBPackageCreate) SetIoIn(i int64) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetIoIn(i)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableIoIn sets the "io_in" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableIoIn(i *int64) *DBPackageCreate {
|
|
||||||
if i != nil {
|
|
||||||
dpc.SetIoIn(*i)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetIoOut sets the "io_out" field.
|
|
||||||
func (dpc *DBPackageCreate) SetIoOut(i int64) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetIoOut(i)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableIoOut sets the "io_out" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableIoOut(i *int64) *DBPackageCreate {
|
|
||||||
if i != nil {
|
|
||||||
dpc.SetIoOut(*i)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTagRev sets the "tag_rev" field.
|
|
||||||
func (dpc *DBPackageCreate) SetTagRev(s string) *DBPackageCreate {
|
|
||||||
dpc.mutation.SetTagRev(s)
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
|
|
||||||
func (dpc *DBPackageCreate) SetNillableTagRev(s *string) *DBPackageCreate {
|
|
||||||
if s != nil {
|
|
||||||
dpc.SetTagRev(*s)
|
|
||||||
}
|
|
||||||
return dpc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the DBPackageMutation object of the builder.
|
|
||||||
func (dpc *DBPackageCreate) Mutation() *DBPackageMutation {
|
|
||||||
return dpc.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save creates the DBPackage in the database.
|
|
||||||
func (dpc *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
|
|
||||||
dpc.defaults()
|
|
||||||
return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX calls Save and panics if Save returns an error.
|
|
||||||
func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
|
|
||||||
v, err := dpc.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (dpc *DBPackageCreate) Exec(ctx context.Context) error {
|
|
||||||
_, err := dpc.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (dpc *DBPackageCreate) ExecX(ctx context.Context) {
|
|
||||||
if err := dpc.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaults sets the default values of the builder before save.
|
|
||||||
func (dpc *DBPackageCreate) defaults() {
|
|
||||||
if _, ok := dpc.mutation.Status(); !ok {
|
|
||||||
v := dbpackage.DefaultStatus
|
|
||||||
dpc.mutation.SetStatus(v)
|
|
||||||
}
|
|
||||||
if _, ok := dpc.mutation.Lto(); !ok {
|
|
||||||
v := dbpackage.DefaultLto
|
|
||||||
dpc.mutation.SetLto(v)
|
|
||||||
}
|
|
||||||
if _, ok := dpc.mutation.DebugSymbols(); !ok {
|
|
||||||
v := dbpackage.DefaultDebugSymbols
|
|
||||||
dpc.mutation.SetDebugSymbols(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (dpc *DBPackageCreate) check() error {
|
|
||||||
if _, ok := dpc.mutation.Pkgbase(); !ok {
|
|
||||||
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
|
|
||||||
}
|
|
||||||
if v, ok := dpc.mutation.Pkgbase(); ok {
|
|
||||||
if err := dbpackage.PkgbaseValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := dpc.mutation.Status(); ok {
|
|
||||||
if err := dbpackage.StatusValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := dpc.mutation.Repository(); !ok {
|
|
||||||
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
|
|
||||||
}
|
|
||||||
if v, ok := dpc.mutation.Repository(); ok {
|
|
||||||
if err := dbpackage.RepositoryValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := dpc.mutation.March(); !ok {
|
|
||||||
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
|
|
||||||
}
|
|
||||||
if v, ok := dpc.mutation.March(); ok {
|
|
||||||
if err := dbpackage.MarchValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := dpc.mutation.Lto(); ok {
|
|
||||||
if err := dbpackage.LtoValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := dpc.mutation.DebugSymbols(); ok {
|
|
||||||
if err := dbpackage.DebugSymbolsValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
|
|
||||||
if err := dpc.check(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_node, _spec := dpc.createSpec()
|
|
||||||
if err := sqlgraph.CreateNode(ctx, dpc.driver, _spec); err != nil {
|
|
||||||
if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
id := _spec.ID.Value.(int64)
|
|
||||||
_node.ID = int(id)
|
|
||||||
dpc.mutation.id = &_node.ID
|
|
||||||
dpc.mutation.done = true
|
|
||||||
return _node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpc *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
|
|
||||||
var (
|
|
||||||
_node = &DBPackage{config: dpc.config}
|
|
||||||
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
|
||||||
)
|
|
||||||
if value, ok := dpc.mutation.Pkgbase(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value)
|
|
||||||
_node.Pkgbase = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.Packages(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldPackages, field.TypeJSON, value)
|
|
||||||
_node.Packages = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.Status(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldStatus, field.TypeEnum, value)
|
|
||||||
_node.Status = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.SkipReason(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldSkipReason, field.TypeString, value)
|
|
||||||
_node.SkipReason = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.Repository(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldRepository, field.TypeEnum, value)
|
|
||||||
_node.Repository = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.March(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldMarch, field.TypeString, value)
|
|
||||||
_node.March = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.Version(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldVersion, field.TypeString, value)
|
|
||||||
_node.Version = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.RepoVersion(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldRepoVersion, field.TypeString, value)
|
|
||||||
_node.RepoVersion = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.BuildTimeStart(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldBuildTimeStart, field.TypeTime, value)
|
|
||||||
_node.BuildTimeStart = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.Updated(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
|
|
||||||
_node.Updated = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.Lto(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
|
|
||||||
_node.Lto = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.LastVersionBuild(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldLastVersionBuild, field.TypeString, value)
|
|
||||||
_node.LastVersionBuild = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.LastVerified(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldLastVerified, field.TypeTime, value)
|
|
||||||
_node.LastVerified = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.DebugSymbols(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldDebugSymbols, field.TypeEnum, value)
|
|
||||||
_node.DebugSymbols = value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.MaxRss(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldMaxRss, field.TypeInt64, value)
|
|
||||||
_node.MaxRss = &value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.UTime(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldUTime, field.TypeInt64, value)
|
|
||||||
_node.UTime = &value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.STime(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldSTime, field.TypeInt64, value)
|
|
||||||
_node.STime = &value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.IoIn(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldIoIn, field.TypeInt64, value)
|
|
||||||
_node.IoIn = &value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.IoOut(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
|
|
||||||
_node.IoOut = &value
|
|
||||||
}
|
|
||||||
if value, ok := dpc.mutation.TagRev(); ok {
|
|
||||||
_spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
|
|
||||||
_node.TagRev = &value
|
|
||||||
}
|
|
||||||
return _node, _spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackageCreateBulk is the builder for creating many DBPackage entities in bulk.
|
|
||||||
type DBPackageCreateBulk struct {
|
|
||||||
config
|
|
||||||
err error
|
|
||||||
builders []*DBPackageCreate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save creates the DBPackage entities in the database.
|
|
||||||
func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
|
||||||
if dpcb.err != nil {
|
|
||||||
return nil, dpcb.err
|
|
||||||
}
|
|
||||||
specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders))
|
|
||||||
nodes := make([]*DBPackage, len(dpcb.builders))
|
|
||||||
mutators := make([]Mutator, len(dpcb.builders))
|
|
||||||
for i := range dpcb.builders {
|
|
||||||
func(i int, root context.Context) {
|
|
||||||
builder := dpcb.builders[i]
|
|
||||||
builder.defaults()
|
|
||||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
mutation, ok := m.(*DBPackageMutation)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
|
||||||
}
|
|
||||||
if err := builder.check(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
builder.mutation = mutation
|
|
||||||
var err error
|
|
||||||
nodes[i], specs[i] = builder.createSpec()
|
|
||||||
if i < len(mutators)-1 {
|
|
||||||
_, err = mutators[i+1].Mutate(root, dpcb.builders[i+1].mutation)
|
|
||||||
} else {
|
|
||||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
|
||||||
// Invoke the actual operation on the latest mutation in the chain.
|
|
||||||
if err = sqlgraph.BatchCreate(ctx, dpcb.driver, spec); err != nil {
|
|
||||||
if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mutation.id = &nodes[i].ID
|
|
||||||
if specs[i].ID.Value != nil {
|
|
||||||
id := specs[i].ID.Value.(int64)
|
|
||||||
nodes[i].ID = int(id)
|
|
||||||
}
|
|
||||||
mutation.done = true
|
|
||||||
return nodes[i], nil
|
|
||||||
})
|
|
||||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
|
||||||
mut = builder.hooks[i](mut)
|
|
||||||
}
|
|
||||||
mutators[i] = mut
|
|
||||||
}(i, ctx)
|
|
||||||
}
|
|
||||||
if len(mutators) > 0 {
|
|
||||||
if _, err := mutators[0].Mutate(ctx, dpcb.builders[0].mutation); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
|
|
||||||
v, err := dpcb.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (dpcb *DBPackageCreateBulk) Exec(ctx context.Context) error {
|
|
||||||
_, err := dpcb.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (dpcb *DBPackageCreateBulk) ExecX(ctx context.Context) {
|
|
||||||
if err := dpcb.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,88 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/predicate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DBPackageDelete is the builder for deleting a DBPackage entity.
|
|
||||||
type DBPackageDelete struct {
|
|
||||||
config
|
|
||||||
hooks []Hook
|
|
||||||
mutation *DBPackageMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the DBPackageDelete builder.
|
|
||||||
func (dpd *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
|
|
||||||
dpd.mutation.Where(ps...)
|
|
||||||
return dpd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
|
||||||
func (dpd *DBPackageDelete) Exec(ctx context.Context) (int, error) {
|
|
||||||
return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (dpd *DBPackageDelete) ExecX(ctx context.Context) int {
|
|
||||||
n, err := dpd.Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpd *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
|
||||||
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
|
||||||
if ps := dpd.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
affected, err := sqlgraph.DeleteNodes(ctx, dpd.driver, _spec)
|
|
||||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
dpd.mutation.done = true
|
|
||||||
return affected, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
|
|
||||||
type DBPackageDeleteOne struct {
|
|
||||||
dpd *DBPackageDelete
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the DBPackageDelete builder.
|
|
||||||
func (dpdo *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
|
|
||||||
dpdo.dpd.mutation.Where(ps...)
|
|
||||||
return dpdo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the deletion query.
|
|
||||||
func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error {
|
|
||||||
n, err := dpdo.dpd.Exec(ctx)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
case n == 0:
|
|
||||||
return &NotFoundError{dbpackage.Label}
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (dpdo *DBPackageDeleteOne) ExecX(ctx context.Context) {
|
|
||||||
if err := dpdo.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,550 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/predicate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DBPackageQuery is the builder for querying DBPackage entities.
|
|
||||||
type DBPackageQuery struct {
|
|
||||||
config
|
|
||||||
ctx *QueryContext
|
|
||||||
order []dbpackage.OrderOption
|
|
||||||
inters []Interceptor
|
|
||||||
predicates []predicate.DBPackage
|
|
||||||
modifiers []func(*sql.Selector)
|
|
||||||
// intermediate query (i.e. traversal path).
|
|
||||||
sql *sql.Selector
|
|
||||||
path func(context.Context) (*sql.Selector, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where adds a new predicate for the DBPackageQuery builder.
|
|
||||||
func (dpq *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery {
|
|
||||||
dpq.predicates = append(dpq.predicates, ps...)
|
|
||||||
return dpq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit the number of records to be returned by this query.
|
|
||||||
func (dpq *DBPackageQuery) Limit(limit int) *DBPackageQuery {
|
|
||||||
dpq.ctx.Limit = &limit
|
|
||||||
return dpq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offset to start from.
|
|
||||||
func (dpq *DBPackageQuery) Offset(offset int) *DBPackageQuery {
|
|
||||||
dpq.ctx.Offset = &offset
|
|
||||||
return dpq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unique configures the query builder to filter duplicate records on query.
|
|
||||||
// By default, unique is set to true, and can be disabled using this method.
|
|
||||||
func (dpq *DBPackageQuery) Unique(unique bool) *DBPackageQuery {
|
|
||||||
dpq.ctx.Unique = &unique
|
|
||||||
return dpq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Order specifies how the records should be ordered.
|
|
||||||
func (dpq *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery {
|
|
||||||
dpq.order = append(dpq.order, o...)
|
|
||||||
return dpq
|
|
||||||
}
|
|
||||||
|
|
||||||
// First returns the first DBPackage entity from the query.
|
|
||||||
// Returns a *NotFoundError when no DBPackage was found.
|
|
||||||
func (dpq *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
|
|
||||||
nodes, err := dpq.Limit(1).All(setContextOp(ctx, dpq.ctx, ent.OpQueryFirst))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nil, &NotFoundError{dbpackage.Label}
|
|
||||||
}
|
|
||||||
return nodes[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstX is like First, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
|
|
||||||
node, err := dpq.First(ctx)
|
|
||||||
if err != nil && !IsNotFound(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstID returns the first DBPackage ID from the query.
|
|
||||||
// Returns a *NotFoundError when no DBPackage ID was found.
|
|
||||||
func (dpq *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
|
||||||
var ids []int
|
|
||||||
if ids, err = dpq.Limit(1).IDs(setContextOp(ctx, dpq.ctx, ent.OpQueryFirstID)); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(ids) == 0 {
|
|
||||||
err = &NotFoundError{dbpackage.Label}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return ids[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) FirstIDX(ctx context.Context) int {
|
|
||||||
id, err := dpq.FirstID(ctx)
|
|
||||||
if err != nil && !IsNotFound(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only returns a single DBPackage entity found by the query, ensuring it only returns one.
|
|
||||||
// Returns a *NotSingularError when more than one DBPackage entity is found.
|
|
||||||
// Returns a *NotFoundError when no DBPackage entities are found.
|
|
||||||
func (dpq *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
|
|
||||||
nodes, err := dpq.Limit(2).All(setContextOp(ctx, dpq.ctx, ent.OpQueryOnly))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch len(nodes) {
|
|
||||||
case 1:
|
|
||||||
return nodes[0], nil
|
|
||||||
case 0:
|
|
||||||
return nil, &NotFoundError{dbpackage.Label}
|
|
||||||
default:
|
|
||||||
return nil, &NotSingularError{dbpackage.Label}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyX is like Only, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
|
|
||||||
node, err := dpq.Only(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyID is like Only, but returns the only DBPackage ID in the query.
|
|
||||||
// Returns a *NotSingularError when more than one DBPackage ID is found.
|
|
||||||
// Returns a *NotFoundError when no entities are found.
|
|
||||||
func (dpq *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
|
||||||
var ids []int
|
|
||||||
if ids, err = dpq.Limit(2).IDs(setContextOp(ctx, dpq.ctx, ent.OpQueryOnlyID)); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(ids) {
|
|
||||||
case 1:
|
|
||||||
id = ids[0]
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{dbpackage.Label}
|
|
||||||
default:
|
|
||||||
err = &NotSingularError{dbpackage.Label}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) OnlyIDX(ctx context.Context) int {
|
|
||||||
id, err := dpq.OnlyID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// All executes the query and returns a list of DBPackages.
|
|
||||||
func (dpq *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) {
|
|
||||||
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryAll)
|
|
||||||
if err := dpq.prepareQuery(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
qr := querierAll[[]*DBPackage, *DBPackageQuery]()
|
|
||||||
return withInterceptors[[]*DBPackage](ctx, dpq, qr, dpq.inters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllX is like All, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
|
|
||||||
nodes, err := dpq.All(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDs executes the query and returns a list of DBPackage IDs.
|
|
||||||
func (dpq *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
|
|
||||||
if dpq.ctx.Unique == nil && dpq.path != nil {
|
|
||||||
dpq.Unique(true)
|
|
||||||
}
|
|
||||||
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryIDs)
|
|
||||||
if err = dpq.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDsX is like IDs, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) IDsX(ctx context.Context) []int {
|
|
||||||
ids, err := dpq.IDs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ids
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the count of the given query.
|
|
||||||
func (dpq *DBPackageQuery) Count(ctx context.Context) (int, error) {
|
|
||||||
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryCount)
|
|
||||||
if err := dpq.prepareQuery(ctx); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return withInterceptors[int](ctx, dpq, querierCount[*DBPackageQuery](), dpq.inters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountX is like Count, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) CountX(ctx context.Context) int {
|
|
||||||
count, err := dpq.Count(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exist returns true if the query has elements in the graph.
|
|
||||||
func (dpq *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
|
|
||||||
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryExist)
|
|
||||||
switch _, err := dpq.FirstID(ctx); {
|
|
||||||
case IsNotFound(err):
|
|
||||||
return false, nil
|
|
||||||
case err != nil:
|
|
||||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
|
||||||
default:
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistX is like Exist, but panics if an error occurs.
|
|
||||||
func (dpq *DBPackageQuery) ExistX(ctx context.Context) bool {
|
|
||||||
exist, err := dpq.Exist(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return exist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be
|
|
||||||
// used to prepare common query builders and use them differently after the clone is made.
|
|
||||||
func (dpq *DBPackageQuery) Clone() *DBPackageQuery {
|
|
||||||
if dpq == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &DBPackageQuery{
|
|
||||||
config: dpq.config,
|
|
||||||
ctx: dpq.ctx.Clone(),
|
|
||||||
order: append([]dbpackage.OrderOption{}, dpq.order...),
|
|
||||||
inters: append([]Interceptor{}, dpq.inters...),
|
|
||||||
predicates: append([]predicate.DBPackage{}, dpq.predicates...),
|
|
||||||
// clone intermediate query.
|
|
||||||
sql: dpq.sql.Clone(),
|
|
||||||
path: dpq.path,
|
|
||||||
modifiers: append([]func(*sql.Selector){}, dpq.modifiers...),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupBy is used to group vertices by one or more fields/columns.
|
|
||||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// var v []struct {
|
|
||||||
// Pkgbase string `json:"pkgbase,omitempty"`
|
|
||||||
// Count int `json:"count,omitempty"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// client.DBPackage.Query().
|
|
||||||
// GroupBy(dbpackage.FieldPkgbase).
|
|
||||||
// Aggregate(ent.Count()).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func (dpq *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy {
|
|
||||||
dpq.ctx.Fields = append([]string{field}, fields...)
|
|
||||||
grbuild := &DBPackageGroupBy{build: dpq}
|
|
||||||
grbuild.flds = &dpq.ctx.Fields
|
|
||||||
grbuild.label = dbpackage.Label
|
|
||||||
grbuild.scan = grbuild.Scan
|
|
||||||
return grbuild
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select allows the selection one or more fields/columns for the given query,
|
|
||||||
// instead of selecting all fields in the entity.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// var v []struct {
|
|
||||||
// Pkgbase string `json:"pkgbase,omitempty"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// client.DBPackage.Query().
|
|
||||||
// Select(dbpackage.FieldPkgbase).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func (dpq *DBPackageQuery) Select(fields ...string) *DBPackageSelect {
|
|
||||||
dpq.ctx.Fields = append(dpq.ctx.Fields, fields...)
|
|
||||||
sbuild := &DBPackageSelect{DBPackageQuery: dpq}
|
|
||||||
sbuild.label = dbpackage.Label
|
|
||||||
sbuild.flds, sbuild.scan = &dpq.ctx.Fields, sbuild.Scan
|
|
||||||
return sbuild
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate returns a DBPackageSelect configured with the given aggregations.
|
|
||||||
func (dpq *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
|
||||||
return dpq.Select().Aggregate(fns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpq *DBPackageQuery) prepareQuery(ctx context.Context) error {
|
|
||||||
for _, inter := range dpq.inters {
|
|
||||||
if inter == nil {
|
|
||||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
|
||||||
}
|
|
||||||
if trv, ok := inter.(Traverser); ok {
|
|
||||||
if err := trv.Traverse(ctx, dpq); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, f := range dpq.ctx.Fields {
|
|
||||||
if !dbpackage.ValidColumn(f) {
|
|
||||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if dpq.path != nil {
|
|
||||||
prev, err := dpq.path(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dpq.sql = prev
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpq *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) {
|
|
||||||
var (
|
|
||||||
nodes = []*DBPackage{}
|
|
||||||
_spec = dpq.querySpec()
|
|
||||||
)
|
|
||||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
|
||||||
return (*DBPackage).scanValues(nil, columns)
|
|
||||||
}
|
|
||||||
_spec.Assign = func(columns []string, values []any) error {
|
|
||||||
node := &DBPackage{config: dpq.config}
|
|
||||||
nodes = append(nodes, node)
|
|
||||||
return node.assignValues(columns, values)
|
|
||||||
}
|
|
||||||
if len(dpq.modifiers) > 0 {
|
|
||||||
_spec.Modifiers = dpq.modifiers
|
|
||||||
}
|
|
||||||
for i := range hooks {
|
|
||||||
hooks[i](ctx, _spec)
|
|
||||||
}
|
|
||||||
if err := sqlgraph.QueryNodes(ctx, dpq.driver, _spec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpq *DBPackageQuery) sqlCount(ctx context.Context) (int, error) {
|
|
||||||
_spec := dpq.querySpec()
|
|
||||||
if len(dpq.modifiers) > 0 {
|
|
||||||
_spec.Modifiers = dpq.modifiers
|
|
||||||
}
|
|
||||||
_spec.Node.Columns = dpq.ctx.Fields
|
|
||||||
if len(dpq.ctx.Fields) > 0 {
|
|
||||||
_spec.Unique = dpq.ctx.Unique != nil && *dpq.ctx.Unique
|
|
||||||
}
|
|
||||||
return sqlgraph.CountNodes(ctx, dpq.driver, _spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
|
||||||
_spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
|
||||||
_spec.From = dpq.sql
|
|
||||||
if unique := dpq.ctx.Unique; unique != nil {
|
|
||||||
_spec.Unique = *unique
|
|
||||||
} else if dpq.path != nil {
|
|
||||||
_spec.Unique = true
|
|
||||||
}
|
|
||||||
if fields := dpq.ctx.Fields; len(fields) > 0 {
|
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, dbpackage.FieldID)
|
|
||||||
for i := range fields {
|
|
||||||
if fields[i] != dbpackage.FieldID {
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ps := dpq.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if limit := dpq.ctx.Limit; limit != nil {
|
|
||||||
_spec.Limit = *limit
|
|
||||||
}
|
|
||||||
if offset := dpq.ctx.Offset; offset != nil {
|
|
||||||
_spec.Offset = *offset
|
|
||||||
}
|
|
||||||
if ps := dpq.order; len(ps) > 0 {
|
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return _spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpq *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|
||||||
builder := sql.Dialect(dpq.driver.Dialect())
|
|
||||||
t1 := builder.Table(dbpackage.Table)
|
|
||||||
columns := dpq.ctx.Fields
|
|
||||||
if len(columns) == 0 {
|
|
||||||
columns = dbpackage.Columns
|
|
||||||
}
|
|
||||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
|
||||||
if dpq.sql != nil {
|
|
||||||
selector = dpq.sql
|
|
||||||
selector.Select(selector.Columns(columns...)...)
|
|
||||||
}
|
|
||||||
if dpq.ctx.Unique != nil && *dpq.ctx.Unique {
|
|
||||||
selector.Distinct()
|
|
||||||
}
|
|
||||||
for _, m := range dpq.modifiers {
|
|
||||||
m(selector)
|
|
||||||
}
|
|
||||||
for _, p := range dpq.predicates {
|
|
||||||
p(selector)
|
|
||||||
}
|
|
||||||
for _, p := range dpq.order {
|
|
||||||
p(selector)
|
|
||||||
}
|
|
||||||
if offset := dpq.ctx.Offset; offset != nil {
|
|
||||||
// limit is mandatory for offset clause. We start
|
|
||||||
// with default value, and override it below if needed.
|
|
||||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
|
||||||
}
|
|
||||||
if limit := dpq.ctx.Limit; limit != nil {
|
|
||||||
selector.Limit(*limit)
|
|
||||||
}
|
|
||||||
return selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// Modify adds a query modifier for attaching custom logic to queries.
|
|
||||||
func (dpq *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
|
||||||
dpq.modifiers = append(dpq.modifiers, modifiers...)
|
|
||||||
return dpq.Select()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackageGroupBy is the group-by builder for DBPackage entities.
|
|
||||||
type DBPackageGroupBy struct {
|
|
||||||
selector
|
|
||||||
build *DBPackageQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the group-by query.
|
|
||||||
func (dpgb *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy {
|
|
||||||
dpgb.fns = append(dpgb.fns, fns...)
|
|
||||||
return dpgb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
|
||||||
func (dpgb *DBPackageGroupBy) Scan(ctx context.Context, v any) error {
|
|
||||||
ctx = setContextOp(ctx, dpgb.build.ctx, ent.OpQueryGroupBy)
|
|
||||||
if err := dpgb.build.prepareQuery(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dpgb *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
|
||||||
selector := root.sqlQuery(ctx).Select()
|
|
||||||
aggregation := make([]string, 0, len(dpgb.fns))
|
|
||||||
for _, fn := range dpgb.fns {
|
|
||||||
aggregation = append(aggregation, fn(selector))
|
|
||||||
}
|
|
||||||
if len(selector.SelectedColumns()) == 0 {
|
|
||||||
columns := make([]string, 0, len(*dpgb.flds)+len(dpgb.fns))
|
|
||||||
for _, f := range *dpgb.flds {
|
|
||||||
columns = append(columns, selector.C(f))
|
|
||||||
}
|
|
||||||
columns = append(columns, aggregation...)
|
|
||||||
selector.Select(columns...)
|
|
||||||
}
|
|
||||||
selector.GroupBy(selector.Columns(*dpgb.flds...)...)
|
|
||||||
if err := selector.Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rows := &sql.Rows{}
|
|
||||||
query, args := selector.Query()
|
|
||||||
if err := dpgb.build.driver.Query(ctx, query, args, rows); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return sql.ScanSlice(rows, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackageSelect is the builder for selecting fields of DBPackage entities.
|
|
||||||
type DBPackageSelect struct {
|
|
||||||
*DBPackageQuery
|
|
||||||
selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the selector query.
|
|
||||||
func (dps *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
|
||||||
dps.fns = append(dps.fns, fns...)
|
|
||||||
return dps
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
|
||||||
func (dps *DBPackageSelect) Scan(ctx context.Context, v any) error {
|
|
||||||
ctx = setContextOp(ctx, dps.ctx, ent.OpQuerySelect)
|
|
||||||
if err := dps.prepareQuery(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, dps.DBPackageQuery, dps, dps.inters, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
|
||||||
selector := root.sqlQuery(ctx)
|
|
||||||
aggregation := make([]string, 0, len(dps.fns))
|
|
||||||
for _, fn := range dps.fns {
|
|
||||||
aggregation = append(aggregation, fn(selector))
|
|
||||||
}
|
|
||||||
switch n := len(*dps.selector.flds); {
|
|
||||||
case n == 0 && len(aggregation) > 0:
|
|
||||||
selector.Select(aggregation...)
|
|
||||||
case n != 0 && len(aggregation) > 0:
|
|
||||||
selector.AppendSelect(aggregation...)
|
|
||||||
}
|
|
||||||
rows := &sql.Rows{}
|
|
||||||
query, args := selector.Query()
|
|
||||||
if err := dps.driver.Query(ctx, query, args, rows); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return sql.ScanSlice(rows, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Modify adds a query modifier for attaching custom logic to queries.
|
|
||||||
func (dps *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
|
||||||
dps.modifiers = append(dps.modifiers, modifiers...)
|
|
||||||
return dps
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
608
ent/ent.go
608
ent/ent.go
@@ -1,608 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ent aliases to avoid import conflicts in user's code.
|
|
||||||
type (
|
|
||||||
Op = ent.Op
|
|
||||||
Hook = ent.Hook
|
|
||||||
Value = ent.Value
|
|
||||||
Query = ent.Query
|
|
||||||
QueryContext = ent.QueryContext
|
|
||||||
Querier = ent.Querier
|
|
||||||
QuerierFunc = ent.QuerierFunc
|
|
||||||
Interceptor = ent.Interceptor
|
|
||||||
InterceptFunc = ent.InterceptFunc
|
|
||||||
Traverser = ent.Traverser
|
|
||||||
TraverseFunc = ent.TraverseFunc
|
|
||||||
Policy = ent.Policy
|
|
||||||
Mutator = ent.Mutator
|
|
||||||
Mutation = ent.Mutation
|
|
||||||
MutateFunc = ent.MutateFunc
|
|
||||||
)
|
|
||||||
|
|
||||||
type clientCtxKey struct{}
|
|
||||||
|
|
||||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
|
||||||
func FromContext(ctx context.Context) *Client {
|
|
||||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContext returns a new context with the given Client attached.
|
|
||||||
func NewContext(parent context.Context, c *Client) context.Context {
|
|
||||||
return context.WithValue(parent, clientCtxKey{}, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
type txCtxKey struct{}
|
|
||||||
|
|
||||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
|
||||||
func TxFromContext(ctx context.Context) *Tx {
|
|
||||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
|
||||||
return tx
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTxContext returns a new context with the given Tx attached.
|
|
||||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
|
||||||
return context.WithValue(parent, txCtxKey{}, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderFunc applies an ordering on the sql selector.
|
|
||||||
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
|
||||||
type OrderFunc func(*sql.Selector)
|
|
||||||
|
|
||||||
var (
|
|
||||||
initCheck sync.Once
|
|
||||||
columnCheck sql.ColumnCheck
|
|
||||||
)
|
|
||||||
|
|
||||||
// checkColumn checks if the column exists in the given table.
|
|
||||||
func checkColumn(table, column string) error {
|
|
||||||
initCheck.Do(func() {
|
|
||||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
|
||||||
dbpackage.Table: dbpackage.ValidColumn,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
return columnCheck(table, column)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asc applies the given fields in ASC order.
|
|
||||||
func Asc(fields ...string) func(*sql.Selector) {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
for _, f := range fields {
|
|
||||||
if err := checkColumn(s.TableName(), f); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
}
|
|
||||||
s.OrderBy(sql.Asc(s.C(f)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Desc applies the given fields in DESC order.
|
|
||||||
func Desc(fields ...string) func(*sql.Selector) {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
for _, f := range fields {
|
|
||||||
if err := checkColumn(s.TableName(), f); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
}
|
|
||||||
s.OrderBy(sql.Desc(s.C(f)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
|
||||||
type AggregateFunc func(*sql.Selector) string
|
|
||||||
|
|
||||||
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
|
||||||
//
|
|
||||||
// GroupBy(field1, field2).
|
|
||||||
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
return sql.As(fn(s), end)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count applies the "count" aggregation function on each group.
|
|
||||||
func Count() AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
return sql.Count("*")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max applies the "max" aggregation function on the given field of each group.
|
|
||||||
func Max(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Max(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
|
||||||
func Mean(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Avg(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min applies the "min" aggregation function on the given field of each group.
|
|
||||||
func Min(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Min(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
|
||||||
func Sum(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Sum(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidationError returns when validating a field or edge fails.
|
|
||||||
type ValidationError struct {
|
|
||||||
Name string // Field or edge name.
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *ValidationError) Error() string {
|
|
||||||
return e.err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap implements the errors.Wrapper interface.
|
|
||||||
func (e *ValidationError) Unwrap() error {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidationError returns a boolean indicating whether the error is a validation error.
|
|
||||||
func IsValidationError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *ValidationError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
|
|
||||||
type NotFoundError struct {
|
|
||||||
label string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *NotFoundError) Error() string {
|
|
||||||
return "ent: " + e.label + " not found"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNotFound returns a boolean indicating whether the error is a not found error.
|
|
||||||
func IsNotFound(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *NotFoundError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaskNotFound masks not found error.
|
|
||||||
func MaskNotFound(err error) error {
|
|
||||||
if IsNotFound(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
|
|
||||||
type NotSingularError struct {
|
|
||||||
label string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *NotSingularError) Error() string {
|
|
||||||
return "ent: " + e.label + " not singular"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
|
|
||||||
func IsNotSingular(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *NotSingularError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotLoadedError returns when trying to get a node that was not loaded by the query.
|
|
||||||
type NotLoadedError struct {
|
|
||||||
edge string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *NotLoadedError) Error() string {
|
|
||||||
return "ent: " + e.edge + " edge was not loaded"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
|
|
||||||
func IsNotLoaded(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *NotLoadedError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConstraintError returns when trying to create/update one or more entities and
|
|
||||||
// one or more of their constraints failed. For example, violation of edge or
|
|
||||||
// field uniqueness.
|
|
||||||
type ConstraintError struct {
|
|
||||||
msg string
|
|
||||||
wrap error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e ConstraintError) Error() string {
|
|
||||||
return "ent: constraint failed: " + e.msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap implements the errors.Wrapper interface.
|
|
||||||
func (e *ConstraintError) Unwrap() error {
|
|
||||||
return e.wrap
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
|
|
||||||
func IsConstraintError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *ConstraintError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// selector embedded by the different Select/GroupBy builders.
|
|
||||||
type selector struct {
|
|
||||||
label string
|
|
||||||
flds *[]string
|
|
||||||
fns []AggregateFunc
|
|
||||||
scan func(context.Context, any) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanX is like Scan, but panics if an error occurs.
|
|
||||||
func (s *selector) ScanX(ctx context.Context, v any) {
|
|
||||||
if err := s.scan(ctx, v); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Strings(ctx context.Context) ([]string, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []string
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringsX is like Strings, but panics if an error occurs.
|
|
||||||
func (s *selector) StringsX(ctx context.Context) []string {
|
|
||||||
v, err := s.Strings(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a single string from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) String(ctx context.Context) (_ string, err error) {
|
|
||||||
var v []string
|
|
||||||
if v, err = s.Strings(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringX is like String, but panics if an error occurs.
|
|
||||||
func (s *selector) StringX(ctx context.Context) string {
|
|
||||||
v, err := s.String(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Ints(ctx context.Context) ([]int, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []int
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntsX is like Ints, but panics if an error occurs.
|
|
||||||
func (s *selector) IntsX(ctx context.Context) []int {
|
|
||||||
v, err := s.Ints(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int returns a single int from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Int(ctx context.Context) (_ int, err error) {
|
|
||||||
var v []int
|
|
||||||
if v, err = s.Ints(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntX is like Int, but panics if an error occurs.
|
|
||||||
func (s *selector) IntX(ctx context.Context) int {
|
|
||||||
v, err := s.Int(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []float64
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64sX is like Float64s, but panics if an error occurs.
|
|
||||||
func (s *selector) Float64sX(ctx context.Context) []float64 {
|
|
||||||
v, err := s.Float64s(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
|
|
||||||
var v []float64
|
|
||||||
if v, err = s.Float64s(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64X is like Float64, but panics if an error occurs.
|
|
||||||
func (s *selector) Float64X(ctx context.Context) float64 {
|
|
||||||
v, err := s.Float64(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []bool
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolsX is like Bools, but panics if an error occurs.
|
|
||||||
func (s *selector) BoolsX(ctx context.Context) []bool {
|
|
||||||
v, err := s.Bools(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
|
|
||||||
var v []bool
|
|
||||||
if v, err = s.Bools(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolX is like Bool, but panics if an error occurs.
|
|
||||||
func (s *selector) BoolX(ctx context.Context) bool {
|
|
||||||
v, err := s.Bool(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// withHooks invokes the builder operation with the given hooks, if any.
|
|
||||||
func withHooks[V Value, M any, PM interface {
|
|
||||||
*M
|
|
||||||
Mutation
|
|
||||||
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
|
|
||||||
if len(hooks) == 0 {
|
|
||||||
return exec(ctx)
|
|
||||||
}
|
|
||||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
mutationT, ok := any(m).(PM)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
|
||||||
}
|
|
||||||
// Set the mutation to the builder.
|
|
||||||
*mutation = *mutationT
|
|
||||||
return exec(ctx)
|
|
||||||
})
|
|
||||||
for i := len(hooks) - 1; i >= 0; i-- {
|
|
||||||
if hooks[i] == nil {
|
|
||||||
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
|
||||||
}
|
|
||||||
mut = hooks[i](mut)
|
|
||||||
}
|
|
||||||
v, err := mut.Mutate(ctx, mutation)
|
|
||||||
if err != nil {
|
|
||||||
return value, err
|
|
||||||
}
|
|
||||||
nv, ok := v.(V)
|
|
||||||
if !ok {
|
|
||||||
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
|
|
||||||
}
|
|
||||||
return nv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
|
|
||||||
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
|
|
||||||
if ent.QueryFromContext(ctx) == nil {
|
|
||||||
qc.Op = op
|
|
||||||
ctx = ent.NewQueryContext(ctx, qc)
|
|
||||||
}
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func querierAll[V Value, Q interface {
|
|
||||||
sqlAll(context.Context, ...queryHook) (V, error)
|
|
||||||
}]() Querier {
|
|
||||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
|
||||||
query, ok := q.(Q)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
|
||||||
}
|
|
||||||
return query.sqlAll(ctx)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func querierCount[Q interface {
|
|
||||||
sqlCount(context.Context) (int, error)
|
|
||||||
}]() Querier {
|
|
||||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
|
||||||
query, ok := q.(Q)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
|
||||||
}
|
|
||||||
return query.sqlCount(ctx)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
|
|
||||||
for i := len(inters) - 1; i >= 0; i-- {
|
|
||||||
qr = inters[i].Intercept(qr)
|
|
||||||
}
|
|
||||||
rv, err := qr.Query(ctx, q)
|
|
||||||
if err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
vt, ok := rv.(V)
|
|
||||||
if !ok {
|
|
||||||
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
|
|
||||||
}
|
|
||||||
return vt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
|
|
||||||
sqlScan(context.Context, Q1, any) error
|
|
||||||
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
|
||||||
query, ok := q.(Q1)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
|
||||||
}
|
|
||||||
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
|
|
||||||
return rv.Elem().Interface(), nil
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
})
|
|
||||||
for i := len(inters) - 1; i >= 0; i-- {
|
|
||||||
qr = inters[i].Intercept(qr)
|
|
||||||
}
|
|
||||||
vv, err := qr.Query(ctx, rootQuery)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch rv2 := reflect.ValueOf(vv); {
|
|
||||||
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
|
|
||||||
case rv.Type() == rv2.Type():
|
|
||||||
rv.Elem().Set(rv2.Elem())
|
|
||||||
case rv.Elem().Type() == rv2.Type():
|
|
||||||
rv.Elem().Set(rv2)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryHook describes an internal hook for the different sqlAll methods.
|
|
||||||
type queryHook func(context.Context, *sqlgraph.QuerySpec)
|
|
@@ -1,84 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package enttest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
|
||||||
// required by schema hooks.
|
|
||||||
_ "somegit.dev/ALHP/ALHP.GO/ent/runtime"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// TestingT is the interface that is shared between
|
|
||||||
// testing.T and testing.B and used by enttest.
|
|
||||||
TestingT interface {
|
|
||||||
FailNow()
|
|
||||||
Error(...any)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option configures client creation.
|
|
||||||
Option func(*options)
|
|
||||||
|
|
||||||
options struct {
|
|
||||||
opts []ent.Option
|
|
||||||
migrateOpts []schema.MigrateOption
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithOptions forwards options to client creation.
|
|
||||||
func WithOptions(opts ...ent.Option) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.opts = append(o.opts, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMigrateOptions forwards options to auto migration.
|
|
||||||
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.migrateOpts = append(o.migrateOpts, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOptions(opts []Option) *options {
|
|
||||||
o := &options{}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(o)
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open calls ent.Open and auto-run migration.
|
|
||||||
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
|
|
||||||
o := newOptions(opts)
|
|
||||||
c, err := ent.Open(driverName, dataSourceName, o.opts...)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
migrateSchema(t, c, o)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient calls ent.NewClient and auto-run migration.
|
|
||||||
func NewClient(t TestingT, opts ...Option) *ent.Client {
|
|
||||||
o := newOptions(opts)
|
|
||||||
c := ent.NewClient(o.opts...)
|
|
||||||
migrateSchema(t, c, o)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
func migrateSchema(t TestingT, c *ent.Client, o *options) {
|
|
||||||
tables, err := schema.CopyTables(migrate.Tables)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,3 +0,0 @@
|
|||||||
package ent
|
|
||||||
|
|
||||||
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/modifier ./schema
|
|
199
ent/hook/hook.go
199
ent/hook/hook.go
@@ -1,199 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package hook
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The DBPackageFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as DBPackage mutator.
|
|
||||||
type DBPackageFunc func(context.Context, *ent.DBPackageMutation) (ent.Value, error)
|
|
||||||
|
|
||||||
// Mutate calls f(ctx, m).
|
|
||||||
func (f DBPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
|
||||||
if mv, ok := m.(*ent.DBPackageMutation); ok {
|
|
||||||
return f(ctx, mv)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DBPackageMutation", m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Condition is a hook condition function.
|
|
||||||
type Condition func(context.Context, ent.Mutation) bool
|
|
||||||
|
|
||||||
// And groups conditions with the AND operator.
|
|
||||||
func And(first, second Condition, rest ...Condition) Condition {
|
|
||||||
return func(ctx context.Context, m ent.Mutation) bool {
|
|
||||||
if !first(ctx, m) || !second(ctx, m) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, cond := range rest {
|
|
||||||
if !cond(ctx, m) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or groups conditions with the OR operator.
|
|
||||||
func Or(first, second Condition, rest ...Condition) Condition {
|
|
||||||
return func(ctx context.Context, m ent.Mutation) bool {
|
|
||||||
if first(ctx, m) || second(ctx, m) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, cond := range rest {
|
|
||||||
if cond(ctx, m) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not negates a given condition.
|
|
||||||
func Not(cond Condition) Condition {
|
|
||||||
return func(ctx context.Context, m ent.Mutation) bool {
|
|
||||||
return !cond(ctx, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasOp is a condition testing mutation operation.
|
|
||||||
func HasOp(op ent.Op) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
return m.Op().Is(op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasAddedFields is a condition validating `.AddedField` on fields.
|
|
||||||
func HasAddedFields(field string, fields ...string) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
if _, exists := m.AddedField(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
if _, exists := m.AddedField(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasClearedFields is a condition validating `.FieldCleared` on fields.
|
|
||||||
func HasClearedFields(field string, fields ...string) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
if exists := m.FieldCleared(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
if exists := m.FieldCleared(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasFields is a condition validating `.Field` on fields.
|
|
||||||
func HasFields(field string, fields ...string) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
if _, exists := m.Field(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
if _, exists := m.Field(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If executes the given hook under condition.
|
|
||||||
//
|
|
||||||
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
|
|
||||||
func If(hk ent.Hook, cond Condition) ent.Hook {
|
|
||||||
return func(next ent.Mutator) ent.Mutator {
|
|
||||||
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
|
||||||
if cond(ctx, m) {
|
|
||||||
return hk(next).Mutate(ctx, m)
|
|
||||||
}
|
|
||||||
return next.Mutate(ctx, m)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// On executes the given hook only for the given operation.
|
|
||||||
//
|
|
||||||
// hook.On(Log, ent.Delete|ent.Create)
|
|
||||||
func On(hk ent.Hook, op ent.Op) ent.Hook {
|
|
||||||
return If(hk, HasOp(op))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unless skips the given hook only for the given operation.
|
|
||||||
//
|
|
||||||
// hook.Unless(Log, ent.Update|ent.UpdateOne)
|
|
||||||
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
|
|
||||||
return If(hk, Not(HasOp(op)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FixedError is a hook returning a fixed error.
|
|
||||||
func FixedError(err error) ent.Hook {
|
|
||||||
return func(ent.Mutator) ent.Mutator {
|
|
||||||
return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) {
|
|
||||||
return nil, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject returns a hook that rejects all operations that match op.
|
|
||||||
//
|
|
||||||
// func (T) Hooks() []ent.Hook {
|
|
||||||
// return []ent.Hook{
|
|
||||||
// Reject(ent.Delete|ent.Update),
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
func Reject(op ent.Op) ent.Hook {
|
|
||||||
hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
|
|
||||||
return On(hk, op)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chain acts as a list of hooks and is effectively immutable.
|
|
||||||
// Once created, it will always hold the same set of hooks in the same order.
|
|
||||||
type Chain struct {
|
|
||||||
hooks []ent.Hook
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewChain creates a new chain of hooks.
|
|
||||||
func NewChain(hooks ...ent.Hook) Chain {
|
|
||||||
return Chain{append([]ent.Hook(nil), hooks...)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook chains the list of hooks and returns the final hook.
|
|
||||||
func (c Chain) Hook() ent.Hook {
|
|
||||||
return func(mutator ent.Mutator) ent.Mutator {
|
|
||||||
for i := len(c.hooks) - 1; i >= 0; i-- {
|
|
||||||
mutator = c.hooks[i](mutator)
|
|
||||||
}
|
|
||||||
return mutator
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append extends a chain, adding the specified hook
|
|
||||||
// as the last ones in the mutation flow.
|
|
||||||
func (c Chain) Append(hooks ...ent.Hook) Chain {
|
|
||||||
newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
|
|
||||||
newHooks = append(newHooks, c.hooks...)
|
|
||||||
newHooks = append(newHooks, hooks...)
|
|
||||||
return Chain{newHooks}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend extends a chain, adding the specified chain
|
|
||||||
// as the last ones in the mutation flow.
|
|
||||||
func (c Chain) Extend(chain Chain) Chain {
|
|
||||||
return c.Append(chain.hooks...)
|
|
||||||
}
|
|
@@ -1,64 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package migrate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// WithGlobalUniqueID sets the universal ids options to the migration.
|
|
||||||
// If this option is enabled, ent migration will allocate a 1<<32 range
|
|
||||||
// for the ids of each entity (table).
|
|
||||||
// Note that this option cannot be applied on tables that already exist.
|
|
||||||
WithGlobalUniqueID = schema.WithGlobalUniqueID
|
|
||||||
// WithDropColumn sets the drop column option to the migration.
|
|
||||||
// If this option is enabled, ent migration will drop old columns
|
|
||||||
// that were used for both fields and edges. This defaults to false.
|
|
||||||
WithDropColumn = schema.WithDropColumn
|
|
||||||
// WithDropIndex sets the drop index option to the migration.
|
|
||||||
// If this option is enabled, ent migration will drop old indexes
|
|
||||||
// that were defined in the schema. This defaults to false.
|
|
||||||
// Note that unique constraints are defined using `UNIQUE INDEX`,
|
|
||||||
// and therefore, it's recommended to enable this option to get more
|
|
||||||
// flexibility in the schema changes.
|
|
||||||
WithDropIndex = schema.WithDropIndex
|
|
||||||
// WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true.
|
|
||||||
WithForeignKeys = schema.WithForeignKeys
|
|
||||||
)
|
|
||||||
|
|
||||||
// Schema is the API for creating, migrating and dropping a schema.
|
|
||||||
type Schema struct {
|
|
||||||
drv dialect.Driver
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSchema creates a new schema client.
|
|
||||||
func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} }
|
|
||||||
|
|
||||||
// Create creates all schema resources.
|
|
||||||
func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error {
|
|
||||||
return Create(ctx, s, Tables, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates all table resources using the given schema driver.
|
|
||||||
func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error {
|
|
||||||
migrate, err := schema.NewMigrate(s.drv, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("ent/migrate: %w", err)
|
|
||||||
}
|
|
||||||
return migrate.Create(ctx, tables...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTo writes the schema changes to w instead of running them against the database.
|
|
||||||
//
|
|
||||||
// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
|
|
||||||
// log.Fatal(err)
|
|
||||||
// }
|
|
||||||
func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error {
|
|
||||||
return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...)
|
|
||||||
}
|
|
@@ -1,48 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package migrate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DbPackagesColumns holds the columns for the "db_packages" table.
|
|
||||||
DbPackagesColumns = []*schema.Column{
|
|
||||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
|
||||||
{Name: "pkgbase", Type: field.TypeString},
|
|
||||||
{Name: "packages", Type: field.TypeJSON, Nullable: true},
|
|
||||||
{Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "built", "queued", "delayed", "building", "latest", "signing", "unknown"}, Default: "unknown"},
|
|
||||||
{Name: "skip_reason", Type: field.TypeString, Nullable: true},
|
|
||||||
{Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core", "multilib"}},
|
|
||||||
{Name: "march", Type: field.TypeString},
|
|
||||||
{Name: "version", Type: field.TypeString, Nullable: true},
|
|
||||||
{Name: "repo_version", Type: field.TypeString, Nullable: true},
|
|
||||||
{Name: "build_time_start", Type: field.TypeTime, Nullable: true},
|
|
||||||
{Name: "updated", Type: field.TypeTime, Nullable: true},
|
|
||||||
{Name: "lto", Type: field.TypeEnum, Nullable: true, Enums: []string{"enabled", "unknown", "disabled", "auto_disabled"}, Default: "unknown"},
|
|
||||||
{Name: "last_version_build", Type: field.TypeString, Nullable: true},
|
|
||||||
{Name: "last_verified", Type: field.TypeTime, Nullable: true},
|
|
||||||
{Name: "debug_symbols", Type: field.TypeEnum, Nullable: true, Enums: []string{"available", "unknown", "not_available"}, Default: "unknown"},
|
|
||||||
{Name: "max_rss", Type: field.TypeInt64, Nullable: true},
|
|
||||||
{Name: "u_time", Type: field.TypeInt64, Nullable: true},
|
|
||||||
{Name: "s_time", Type: field.TypeInt64, Nullable: true},
|
|
||||||
{Name: "io_in", Type: field.TypeInt64, Nullable: true},
|
|
||||||
{Name: "io_out", Type: field.TypeInt64, Nullable: true},
|
|
||||||
{Name: "tag_rev", Type: field.TypeString, Nullable: true},
|
|
||||||
}
|
|
||||||
// DbPackagesTable holds the schema information for the "db_packages" table.
|
|
||||||
DbPackagesTable = &schema.Table{
|
|
||||||
Name: "db_packages",
|
|
||||||
Columns: DbPackagesColumns,
|
|
||||||
PrimaryKey: []*schema.Column{DbPackagesColumns[0]},
|
|
||||||
}
|
|
||||||
// Tables holds all the tables in the schema.
|
|
||||||
Tables = []*schema.Table{
|
|
||||||
DbPackagesTable,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
1896
ent/mutation.go
1896
ent/mutation.go
File diff suppressed because it is too large
Load Diff
@@ -1,10 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package predicate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DBPackage is the predicate function for dbpackage builders.
|
|
||||||
type DBPackage func(*sql.Selector)
|
|
@@ -1,24 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The init function reads all schema descriptors with runtime code
|
|
||||||
// (default values, validators, hooks and policies) and stitches it
|
|
||||||
// to their package variables.
|
|
||||||
func init() {
|
|
||||||
dbpackageFields := schema.DBPackage{}.Fields()
|
|
||||||
_ = dbpackageFields
|
|
||||||
// dbpackageDescPkgbase is the schema descriptor for pkgbase field.
|
|
||||||
dbpackageDescPkgbase := dbpackageFields[0].Descriptor()
|
|
||||||
// dbpackage.PkgbaseValidator is a validator for the "pkgbase" field. It is called by the builders before save.
|
|
||||||
dbpackage.PkgbaseValidator = dbpackageDescPkgbase.Validators[0].(func(string) error)
|
|
||||||
// dbpackageDescMarch is the schema descriptor for march field.
|
|
||||||
dbpackageDescMarch := dbpackageFields[5].Descriptor()
|
|
||||||
// dbpackage.MarchValidator is a validator for the "march" field. It is called by the builders before save.
|
|
||||||
dbpackage.MarchValidator = dbpackageDescMarch.Validators[0].(func(string) error)
|
|
||||||
}
|
|
@@ -1,10 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package runtime
|
|
||||||
|
|
||||||
// The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go
|
|
||||||
|
|
||||||
const (
|
|
||||||
Version = "v0.14.2" // Version of ent codegen.
|
|
||||||
Sum = "h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0=" // Sum of ent codegen.
|
|
||||||
)
|
|
@@ -1,43 +0,0 @@
|
|||||||
package schema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DBPackage holds the schema definition for the DbPackage entity.
|
|
||||||
type DBPackage struct {
|
|
||||||
ent.Schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields of the DBPackage.
|
|
||||||
func (DBPackage) Fields() []ent.Field {
|
|
||||||
return []ent.Field{
|
|
||||||
field.String("pkgbase").NotEmpty().Immutable(),
|
|
||||||
field.Strings("packages").Optional(),
|
|
||||||
field.Enum("status").Values("skipped", "failed", "built", "queued", "delayed", "building",
|
|
||||||
"latest", "signing", "unknown").Default("unknown").Optional(),
|
|
||||||
field.String("skip_reason").Optional(),
|
|
||||||
field.Enum("repository").Values("extra", "core", "multilib"),
|
|
||||||
field.String("march").NotEmpty().Immutable(),
|
|
||||||
field.String("version").Optional(),
|
|
||||||
field.String("repo_version").Optional(),
|
|
||||||
field.Time("build_time_start").Optional(),
|
|
||||||
field.Time("updated").Optional(),
|
|
||||||
field.Enum("lto").Values("enabled", "unknown", "disabled", "auto_disabled").Default("unknown").Optional(),
|
|
||||||
field.String("last_version_build").Optional(),
|
|
||||||
field.Time("last_verified").Optional(),
|
|
||||||
field.Enum("debug_symbols").Values("available", "unknown", "not_available").Default("unknown").Optional(),
|
|
||||||
field.Int64("max_rss").Optional().Nillable(),
|
|
||||||
field.Int64("u_time").Optional().Nillable(),
|
|
||||||
field.Int64("s_time").Optional().Nillable(),
|
|
||||||
field.Int64("io_in").Optional().Nillable(),
|
|
||||||
field.Int64("io_out").Optional().Nillable(),
|
|
||||||
field.String("tag_rev").Optional().Nillable(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Edges of the DBPackage.
|
|
||||||
func (DBPackage) Edges() []ent.Edge {
|
|
||||||
return nil
|
|
||||||
}
|
|
210
ent/tx.go
210
ent/tx.go
@@ -1,210 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tx is a transactional client that is created by calling Client.Tx().
|
|
||||||
type Tx struct {
|
|
||||||
config
|
|
||||||
// DBPackage is the client for interacting with the DBPackage builders.
|
|
||||||
DBPackage *DBPackageClient
|
|
||||||
|
|
||||||
// lazily loaded.
|
|
||||||
client *Client
|
|
||||||
clientOnce sync.Once
|
|
||||||
// ctx lives for the life of the transaction. It is
|
|
||||||
// the same context used by the underlying connection.
|
|
||||||
ctx context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Committer is the interface that wraps the Commit method.
|
|
||||||
Committer interface {
|
|
||||||
Commit(context.Context, *Tx) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The CommitFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as a Committer. If f is a function with the appropriate
|
|
||||||
// signature, CommitFunc(f) is a Committer that calls f.
|
|
||||||
CommitFunc func(context.Context, *Tx) error
|
|
||||||
|
|
||||||
// CommitHook defines the "commit middleware". A function that gets a Committer
|
|
||||||
// and returns a Committer. For example:
|
|
||||||
//
|
|
||||||
// hook := func(next ent.Committer) ent.Committer {
|
|
||||||
// return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error {
|
|
||||||
// // Do some stuff before.
|
|
||||||
// if err := next.Commit(ctx, tx); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// // Do some stuff after.
|
|
||||||
// return nil
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
CommitHook func(Committer) Committer
|
|
||||||
)
|
|
||||||
|
|
||||||
// Commit calls f(ctx, m).
|
|
||||||
func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error {
|
|
||||||
return f(ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit commits the transaction.
|
|
||||||
func (tx *Tx) Commit() error {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
var fn Committer = CommitFunc(func(context.Context, *Tx) error {
|
|
||||||
return txDriver.tx.Commit()
|
|
||||||
})
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
hooks := append([]CommitHook(nil), txDriver.onCommit...)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
for i := len(hooks) - 1; i >= 0; i-- {
|
|
||||||
fn = hooks[i](fn)
|
|
||||||
}
|
|
||||||
return fn.Commit(tx.ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnCommit adds a hook to call on commit.
|
|
||||||
func (tx *Tx) OnCommit(f CommitHook) {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
txDriver.onCommit = append(txDriver.onCommit, f)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Rollbacker is the interface that wraps the Rollback method.
|
|
||||||
Rollbacker interface {
|
|
||||||
Rollback(context.Context, *Tx) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The RollbackFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as a Rollbacker. If f is a function with the appropriate
|
|
||||||
// signature, RollbackFunc(f) is a Rollbacker that calls f.
|
|
||||||
RollbackFunc func(context.Context, *Tx) error
|
|
||||||
|
|
||||||
// RollbackHook defines the "rollback middleware". A function that gets a Rollbacker
|
|
||||||
// and returns a Rollbacker. For example:
|
|
||||||
//
|
|
||||||
// hook := func(next ent.Rollbacker) ent.Rollbacker {
|
|
||||||
// return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error {
|
|
||||||
// // Do some stuff before.
|
|
||||||
// if err := next.Rollback(ctx, tx); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// // Do some stuff after.
|
|
||||||
// return nil
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
RollbackHook func(Rollbacker) Rollbacker
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rollback calls f(ctx, m).
|
|
||||||
func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error {
|
|
||||||
return f(ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rollback rollbacks the transaction.
|
|
||||||
func (tx *Tx) Rollback() error {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
|
|
||||||
return txDriver.tx.Rollback()
|
|
||||||
})
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
hooks := append([]RollbackHook(nil), txDriver.onRollback...)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
for i := len(hooks) - 1; i >= 0; i-- {
|
|
||||||
fn = hooks[i](fn)
|
|
||||||
}
|
|
||||||
return fn.Rollback(tx.ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnRollback adds a hook to call on rollback.
|
|
||||||
func (tx *Tx) OnRollback(f RollbackHook) {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
txDriver.onRollback = append(txDriver.onRollback, f)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client returns a Client that binds to current transaction.
|
|
||||||
func (tx *Tx) Client() *Client {
|
|
||||||
tx.clientOnce.Do(func() {
|
|
||||||
tx.client = &Client{config: tx.config}
|
|
||||||
tx.client.init()
|
|
||||||
})
|
|
||||||
return tx.client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *Tx) init() {
|
|
||||||
tx.DBPackage = NewDBPackageClient(tx.config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
|
|
||||||
// The idea is to support transactions without adding any extra code to the builders.
|
|
||||||
// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance.
|
|
||||||
// Commit and Rollback are nop for the internal builders and the user must call one
|
|
||||||
// of them in order to commit or rollback the transaction.
|
|
||||||
//
|
|
||||||
// If a closed transaction is embedded in one of the generated entities, and the entity
|
|
||||||
// applies a query, for example: DBPackage.QueryXXX(), the query will be executed
|
|
||||||
// through the driver which created this transaction.
|
|
||||||
//
|
|
||||||
// Note that txDriver is not goroutine safe.
|
|
||||||
type txDriver struct {
|
|
||||||
// the driver we started the transaction from.
|
|
||||||
drv dialect.Driver
|
|
||||||
// tx is the underlying transaction.
|
|
||||||
tx dialect.Tx
|
|
||||||
// completion hooks.
|
|
||||||
mu sync.Mutex
|
|
||||||
onCommit []CommitHook
|
|
||||||
onRollback []RollbackHook
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTx creates a new transactional driver.
|
|
||||||
func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) {
|
|
||||||
tx, err := drv.Tx(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &txDriver{tx: tx, drv: drv}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls
|
|
||||||
// from the internal builders. Should be called only by the internal builders.
|
|
||||||
func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }
|
|
||||||
|
|
||||||
// Dialect returns the dialect of the driver we started the transaction from.
|
|
||||||
func (tx *txDriver) Dialect() string { return tx.drv.Dialect() }
|
|
||||||
|
|
||||||
// Close is a nop close.
|
|
||||||
func (*txDriver) Close() error { return nil }
|
|
||||||
|
|
||||||
// Commit is a nop commit for the internal builders.
|
|
||||||
// User must call `Tx.Commit` in order to commit the transaction.
|
|
||||||
func (*txDriver) Commit() error { return nil }
|
|
||||||
|
|
||||||
// Rollback is a nop rollback for the internal builders.
|
|
||||||
// User must call `Tx.Rollback` in order to rollback the transaction.
|
|
||||||
func (*txDriver) Rollback() error { return nil }
|
|
||||||
|
|
||||||
// Exec calls tx.Exec.
|
|
||||||
func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error {
|
|
||||||
return tx.tx.Exec(ctx, query, args, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query calls tx.Query.
|
|
||||||
func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error {
|
|
||||||
return tx.tx.Query(ctx, query, args, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dialect.Driver = (*txDriver)(nil)
|
|
44
flags.yaml
44
flags.yaml
@@ -1,44 +0,0 @@
|
|||||||
# template values get replaced on makepkg.conf generation
|
|
||||||
# $level$ -> march x86-64 level, e.g. v3
|
|
||||||
# $march$ -> full march, e.g. x86-64-v3
|
|
||||||
# $buildproc$ -> number of threads to build with
|
|
||||||
|
|
||||||
common:
|
|
||||||
cflags:
|
|
||||||
- "-mtune=generic": ~
|
|
||||||
- "-O2": "-O3"
|
|
||||||
- "-mpclmul" # https://somegit.dev/ALHP/ALHP.GO/issues/92
|
|
||||||
- "-march=x86-64": "-march=$march$"
|
|
||||||
|
|
||||||
options:
|
|
||||||
- "lto": "!lto" # disable lto; see 'lto' section below
|
|
||||||
|
|
||||||
buildenv:
|
|
||||||
- "color": "!color" # color messes up the log output
|
|
||||||
|
|
||||||
goamd64: "$level$" # https://somegit.dev/ALHP/ALHP.GO/issues/116
|
|
||||||
packager: "ALHP $march$ <alhp@harting.dev>"
|
|
||||||
makeflags: "-j$buildproc$"
|
|
||||||
# https://somegit.dev/ALHP/ALHP.GO/issues/110
|
|
||||||
rustflags:
|
|
||||||
- "-Copt-level=3"
|
|
||||||
- "-Ctarget-cpu=$march$"
|
|
||||||
- "-Clink-arg=-z"
|
|
||||||
- "-Clink-arg=pack-relative-relocs"
|
|
||||||
ltoflags:
|
|
||||||
- "-falign-functions=32" # https://github.com/InBetweenNames/gentooLTO/issues/164
|
|
||||||
kcflags: " -march=$march$ -O3"
|
|
||||||
kcppflags: " -march=$march$ -O3"
|
|
||||||
fcflags: "$FFLAGS"
|
|
||||||
fflags:
|
|
||||||
- "-O2": "-O3"
|
|
||||||
- "-march=$march$"
|
|
||||||
|
|
||||||
lto:
|
|
||||||
rustflags:
|
|
||||||
- "-Ccodegen-units=1"
|
|
||||||
|
|
||||||
options:
|
|
||||||
- "!lto": "lto"
|
|
||||||
|
|
||||||
cargo_profile_release_lto: "fat"
|
|
3
git_clean.sh
Normal file
3
git_clean.sh
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
cd "$1" && git clean -xdff
|
59
go.mod
59
go.mod
@@ -1,58 +1,13 @@
|
|||||||
module somegit.dev/ALHP/ALHP.GO
|
module ALHP.go
|
||||||
|
|
||||||
go 1.23.0
|
go 1.16
|
||||||
|
|
||||||
toolchain go1.23.1
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
entgo.io/ent v0.14.3
|
github.com/Jguer/go-alpm/v2 v2.0.5
|
||||||
github.com/Jguer/go-alpm/v2 v2.2.2
|
|
||||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5
|
|
||||||
github.com/Morganamilo/go-srcinfo v1.0.0
|
github.com/Morganamilo/go-srcinfo v1.0.0
|
||||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||||
github.com/gobwas/glob v0.2.3
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/google/uuid v1.6.0
|
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
|
||||||
github.com/jackc/pgx/v4 v4.18.3
|
github.com/yargevad/filepathx v1.0.0
|
||||||
github.com/otiai10/copy v1.14.1
|
|
||||||
github.com/prometheus/client_golang v1.21.1
|
|
||||||
github.com/sethvargo/go-retry v0.3.0
|
|
||||||
github.com/sirupsen/logrus v1.9.3
|
|
||||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
|
||||||
ariga.io/atlas v0.32.0 // indirect
|
|
||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
|
||||||
github.com/go-openapi/inflect v0.21.1 // indirect
|
|
||||||
github.com/google/go-cmp v0.7.0 // indirect
|
|
||||||
github.com/hashicorp/hcl/v2 v2.23.0 // indirect
|
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
|
||||||
github.com/jackc/pgconn v1.14.3 // indirect
|
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
|
||||||
github.com/jackc/pgtype v1.14.4 // indirect
|
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
|
||||||
github.com/otiai10/mint v1.6.3 // indirect
|
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
|
||||||
github.com/prometheus/common v0.63.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
|
||||||
github.com/zclconf/go-cty v1.16.2 // indirect
|
|
||||||
github.com/zclconf/go-cty-yaml v1.1.0 // indirect
|
|
||||||
golang.org/x/crypto v0.36.0 // indirect
|
|
||||||
golang.org/x/mod v0.24.0 // indirect
|
|
||||||
golang.org/x/sync v0.12.0 // indirect
|
|
||||||
golang.org/x/sys v0.31.0 // indirect
|
|
||||||
golang.org/x/text v0.23.0 // indirect
|
|
||||||
golang.org/x/tools v0.31.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.36.5 // indirect
|
|
||||||
)
|
|
||||||
|
338
go.sum
338
go.sum
@@ -1,340 +1,24 @@
|
|||||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 h1:nX4HXncwIdvQ8/8sIUIf1nyCkK8qdBaHQ7EtzPpuiGE=
|
github.com/Jguer/go-alpm/v2 v2.0.5 h1:1TZxkvCIfTOhjhxGy/Z1FNSeuY9DXBKF5qxUoj0IZ0A=
|
||||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
|
github.com/Jguer/go-alpm/v2 v2.0.5/go.mod h1:zU4iKCtNkDARfj5BrKJXYAQ5nIjtZbySfa0paboSmTQ=
|
||||||
ariga.io/atlas v0.32.0 h1:y+77nueMrExLiKlz1CcPKh/nU7VSlWfBbwCShsJyvCw=
|
|
||||||
ariga.io/atlas v0.32.0/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
|
|
||||||
entgo.io/ent v0.14.2 h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0=
|
|
||||||
entgo.io/ent v0.14.2/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM=
|
|
||||||
entgo.io/ent v0.14.3 h1:wokAV/kIlH9TeklJWGGS7AYJdVckr0DloWjIcO9iIIQ=
|
|
||||||
entgo.io/ent v0.14.3/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
|
||||||
github.com/Jguer/go-alpm/v2 v2.2.2 h1:sPwUoZp1X5Tw6K6Ba1lWvVJfcgVNEGVcxARLBttZnC0=
|
|
||||||
github.com/Jguer/go-alpm/v2 v2.2.2/go.mod h1:lfe8gSe83F/KERaQvEfrSqQ4n+8bES+ZIyKWR/gm3MI=
|
|
||||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
|
||||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
|
||||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c=
|
|
||||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5/go.mod h1:Hk55m330jNiwxRodIlMCvw5iEyoRUCIY64W1p9D+tHc=
|
|
||||||
github.com/Morganamilo/go-srcinfo v1.0.0 h1:Wh4nEF+HJWo+29hnxM18Q2hi+DUf0GejS13+Wg+dzmI=
|
github.com/Morganamilo/go-srcinfo v1.0.0 h1:Wh4nEF+HJWo+29hnxM18Q2hi+DUf0GejS13+Wg+dzmI=
|
||||||
github.com/Morganamilo/go-srcinfo v1.0.0/go.mod h1:MP6VGY1NNpVUmYIEgoM9acix95KQqIRyqQ0hCLsyYUY=
|
github.com/Morganamilo/go-srcinfo v1.0.0/go.mod h1:MP6VGY1NNpVUmYIEgoM9acix95KQqIRyqQ0hCLsyYUY=
|
||||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
|
||||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
|
||||||
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
|
|
||||||
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
|
||||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
|
|
||||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
|
||||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
|
||||||
github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
|
|
||||||
github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
|
||||||
github.com/go-openapi/inflect v0.21.1 h1:swwdJV4YPbuQaz68rHiBeQj+MWeBjDDNyEAi78Fhu4g=
|
|
||||||
github.com/go-openapi/inflect v0.21.1/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
|
||||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
|
||||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos=
|
|
||||||
github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
|
|
||||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
|
||||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
|
||||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
|
||||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
|
||||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
|
||||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
|
||||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
|
||||||
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
|
||||||
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
|
||||||
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
|
|
||||||
github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
|
|
||||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
|
||||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
|
||||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
|
||||||
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
|
||||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
|
||||||
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
|
||||||
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
|
||||||
github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
|
|
||||||
github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
|
||||||
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
|
||||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
|
||||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
|
||||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
|
||||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
|
||||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
|
||||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
|
||||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
|
||||||
github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
|
|
||||||
github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
|
|
||||||
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
|
|
||||||
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
|
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||||
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
|
||||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
|
||||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
|
||||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
|
||||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
|
||||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
|
||||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
|
||||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
|
||||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
|
||||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 h1:shC1HB1UogxN5Ech3Yqaaxj1X/P656PPCB4RbojIJqc=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
|
||||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117 h1:67A5tweHp3C7osHjrYsy6pQZ00bYkTTttZ7kiOwwHeA=
|
|
||||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
|
||||||
github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70=
|
|
||||||
github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
|
||||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
|
|
||||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
|
|
||||||
github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0=
|
|
||||||
github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
|
|
||||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
|
||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
|
||||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
|
||||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
|
||||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
|
||||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
|
||||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
|
||||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
|
||||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
|
||||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
|
||||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
|
||||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|
||||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
|
||||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
|
||||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
|
||||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
|
||||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
|
||||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
|
||||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
|
||||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
|
||||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
|
||||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
|
||||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
|
||||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
|
||||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
|
||||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
|
||||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
|
355
housekeeping.go
355
housekeeping.go
@@ -1,355 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) error {
|
|
||||||
defer wg.Done()
|
|
||||||
fullRepo := repo + "-" + march
|
|
||||||
log.Debugf("[%s] start housekeeping", fullRepo)
|
|
||||||
packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("[HK/%s] removing orphans, signature check", fullRepo)
|
|
||||||
for _, path := range packages {
|
|
||||||
mPackage := Package(path)
|
|
||||||
|
|
||||||
dbPkg, err := mPackage.DBPackage(ctx, db)
|
|
||||||
if ent.IsNotFound(err) {
|
|
||||||
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: *mPackage.FullRepo(),
|
|
||||||
PkgFiles: []string{path},
|
|
||||||
March: *mPackage.MArch(),
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
continue
|
|
||||||
} else if err != nil {
|
|
||||||
log.Warningf("[HK] error fetching %s->%q from db: %v", fullRepo, path, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
Pkgbase: dbPkg.Pkgbase,
|
|
||||||
Repo: mPackage.Repo(),
|
|
||||||
FullRepo: *mPackage.FullRepo(),
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
March: *mPackage.MArch(),
|
|
||||||
Arch: *mPackage.Arch(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if package is still part of repo
|
|
||||||
dbs, err := alpmHandle.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
pkgResolved, err := dbs.FindSatisfier(mPackage.Name())
|
|
||||||
buildManager.alpmMutex.Unlock()
|
|
||||||
if err != nil ||
|
|
||||||
pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() ||
|
|
||||||
pkgResolved.DB().Name() != pkg.Repo.String() ||
|
|
||||||
pkgResolved.Architecture() != pkg.Arch ||
|
|
||||||
pkgResolved.Name() != mPackage.Name() ||
|
|
||||||
MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages) {
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (resolve error: %v)", pkg.FullRepo, mPackage.Name(), err)
|
|
||||||
case pkgResolved.DB().Name() != pkg.DBPackage.Repository.String():
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (repo mismatch: repo:%s != db:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.DB().Name(), pkg.DBPackage.Repository.String())
|
|
||||||
case pkgResolved.DB().Name() != pkg.Repo.String():
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (repo mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.DB().Name(), pkg.Repo.String())
|
|
||||||
case pkgResolved.Architecture() != pkg.Arch:
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (arch mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.Architecture(), pkg.Arch)
|
|
||||||
case pkgResolved.Name() != mPackage.Name():
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (name mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.Name(), mPackage.Name())
|
|
||||||
case MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (blacklisted pkgbase %s)", pkg.FullRepo, mPackage.Name(), pkg.Pkgbase)
|
|
||||||
}
|
|
||||||
|
|
||||||
// package not found on mirror/db -> not part of any repo anymore
|
|
||||||
err = pkg.findPkgFiles()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[HK] %s->%s unable to get pkg-files: %v", pkg.FullRepo, mPackage.Name(), err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(ctx)
|
|
||||||
pkg.DBPackage = nil
|
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) {
|
|
||||||
err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// check if pkg signature is valid
|
|
||||||
valid, err := mPackage.HasValidSignature()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !valid {
|
|
||||||
log.Infof("[HK] %s->%s invalid package signature", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// compare db-version with repo version
|
|
||||||
repoVer, err := pkg.repoVersion()
|
|
||||||
if err == nil && repoVer != dbPkg.RepoVersion {
|
|
||||||
log.Infof("[HK] %s->%s update repoVersion %s->%s", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer)
|
|
||||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearTagRev().Save(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check all packages from db for existence
|
|
||||||
dbPackages, err := db.DBPackage.Query().Where(
|
|
||||||
dbpackage.And(
|
|
||||||
dbpackage.RepositoryEQ(dbpackage.Repository(repo)),
|
|
||||||
dbpackage.March(march),
|
|
||||||
)).All(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("[HK/%s] checking %d packages from database", fullRepo, len(dbPackages))
|
|
||||||
|
|
||||||
for _, dbPkg := range dbPackages {
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
Pkgbase: dbPkg.Pkgbase,
|
|
||||||
Repo: dbPkg.Repository,
|
|
||||||
March: dbPkg.March,
|
|
||||||
FullRepo: dbPkg.Repository.String() + "-" + dbPkg.March,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pkg.isAvailable(ctx, alpmHandle) {
|
|
||||||
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[HK] error deleting package %s->%s: %v", pkg.FullRepo, dbPkg.Pkgbase, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion != "":
|
|
||||||
// check lastVersionBuild
|
|
||||||
if dbPkg.LastVersionBuild != dbPkg.RepoVersion {
|
|
||||||
log.Infof("[HK] %s->%s updating lastVersionBuild %s -> %s", fullRepo, dbPkg.Pkgbase, dbPkg.LastVersionBuild, dbPkg.RepoVersion)
|
|
||||||
nDBPkg, err := dbPkg.Update().SetLastVersionBuild(dbPkg.RepoVersion).Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[HK] error updating lastVersionBuild for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
} else {
|
|
||||||
dbPkg = nDBPkg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var existingSplits []string
|
|
||||||
var missingSplits []string
|
|
||||||
for _, splitPkg := range dbPkg.Packages {
|
|
||||||
pkgFile := filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch,
|
|
||||||
splitPkg+"-"+dbPkg.RepoVersion+"-"+conf.Arch+".pkg.tar.zst")
|
|
||||||
_, err = os.Stat(pkgFile)
|
|
||||||
switch {
|
|
||||||
case os.IsNotExist(err):
|
|
||||||
missingSplits = append(missingSplits, splitPkg)
|
|
||||||
case err != nil:
|
|
||||||
log.Warningf("[HK] error reading package-file %s: %v", splitPkg, err)
|
|
||||||
default:
|
|
||||||
existingSplits = append(existingSplits, pkgFile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(missingSplits) > 0 {
|
|
||||||
log.Infof("[HK] %s->%s missing split-package(s): %s", fullRepo, dbPkg.Pkgbase, missingSplits)
|
|
||||||
pkg.DBPackage, err = pkg.DBPackage.Update().
|
|
||||||
ClearRepoVersion().
|
|
||||||
ClearTagRev().
|
|
||||||
SetStatus(dbpackage.StatusQueued).
|
|
||||||
Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: fullRepo,
|
|
||||||
PkgFiles: existingSplits,
|
|
||||||
March: march,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
}
|
|
||||||
|
|
||||||
rawState, err := os.ReadFile(filepath.Join(conf.Basedir.Work, stateDir, dbPkg.Repository.String()+"-"+conf.Arch, dbPkg.Pkgbase))
|
|
||||||
if err != nil {
|
|
||||||
log.Infof("[HK] state not found for %s->%s: %v, removing package", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: fullRepo,
|
|
||||||
PkgFiles: existingSplits,
|
|
||||||
March: march,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
state, err := parseState(string(rawState))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[HK] error parsing state file for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if dbPkg.TagRev != nil && state.TagRev == *dbPkg.TagRev && state.PkgVer != dbPkg.Version {
|
|
||||||
log.Infof("[HK] reseting package %s->%s with mismatched state information (%s!=%s)",
|
|
||||||
fullRepo, dbPkg.Pkgbase, state.PkgVer, dbPkg.Version)
|
|
||||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
|
|
||||||
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
|
|
||||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.RepoVersion != "" && !strings.HasPrefix(dbPkg.SkipReason, "delayed"):
|
|
||||||
log.Infof("[HK] delete skipped package %s->%s", fullRepo, dbPkg.Pkgbase)
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: fullRepo,
|
|
||||||
March: march,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.SkipReason == "blacklisted" && !MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
|
|
||||||
log.Infof("[HK] requeue previously blacklisted package %s->%s", fullRepo, dbPkg.Pkgbase)
|
|
||||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearSkipReason().ClearTagRev().Exec(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case dbPkg.Status == dbpackage.StatusFailed && dbPkg.RepoVersion != "":
|
|
||||||
log.Infof("[HK] package %s->%s failed but still present in repo, removing", fullRepo, dbPkg.Pkgbase)
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: fullRepo,
|
|
||||||
March: march,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("[HK/%s] all tasks finished", fullRepo)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func logHK(ctx context.Context) error {
|
|
||||||
// check if package for log exists and if error can be fixed by rebuild
|
|
||||||
logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, logFile := range logFiles {
|
|
||||||
pathSplit := strings.Split(logFile, string(filepath.Separator))
|
|
||||||
extSplit := strings.Split(filepath.Base(logFile), ".")
|
|
||||||
pkgbase := strings.Join(extSplit[:len(extSplit)-1], ".")
|
|
||||||
march := pathSplit[len(pathSplit)-2]
|
|
||||||
|
|
||||||
pkg := ProtoPackage{
|
|
||||||
Pkgbase: pkgbase,
|
|
||||||
March: march,
|
|
||||||
}
|
|
||||||
|
|
||||||
if exists, err := pkg.exists(); err != nil {
|
|
||||||
return err
|
|
||||||
} else if !exists {
|
|
||||||
_ = os.Remove(logFile)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pkgSkipped, err := db.DBPackage.Query().Where(
|
|
||||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
|
||||||
dbpackage.March(pkg.March),
|
|
||||||
dbpackage.StatusEQ(dbpackage.StatusSkipped),
|
|
||||||
).Exist(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkgSkipped {
|
|
||||||
_ = os.Remove(logFile)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
logContent, err := os.ReadFile(logFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sLogContent := string(logContent)
|
|
||||||
|
|
||||||
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) ||
|
|
||||||
reDownloadError2.MatchString(sLogContent) {
|
|
||||||
rows, err := db.DBPackage.Update().Where(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
|
|
||||||
dbpackage.StatusEQ(dbpackage.StatusFailed)).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if rows > 0 {
|
|
||||||
log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows)
|
|
||||||
}
|
|
||||||
} else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) {
|
|
||||||
rows, err := db.DBPackage.Update().Where(
|
|
||||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
|
||||||
dbpackage.March(pkg.March),
|
|
||||||
dbpackage.StatusEQ(dbpackage.StatusFailed),
|
|
||||||
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
|
|
||||||
).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if rows > 0 {
|
|
||||||
log.Infof("[HK/%s/%s] fixable build-error detected (linker-error), requeueing package (%d)", pkg.March, pkg.Pkgbase, rows)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func debugHK() {
|
|
||||||
for _, march := range conf.March {
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march)); err == nil {
|
|
||||||
log.Debugf("[DHK/%s] start cleanup debug packages", march)
|
|
||||||
cleanCmd := exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Debug, march), "-k", "1")
|
|
||||||
res, err := cleanCmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[DHK/%s] cleanup debug packages failed: %v (%s)", march, err, string(res))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
812
main.go
812
main.go
@@ -1,153 +1,731 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"bufio"
|
||||||
"entgo.io/ent/dialect"
|
"bytes"
|
||||||
"entgo.io/ent/dialect/sql"
|
"fmt"
|
||||||
"flag"
|
|
||||||
"github.com/Jguer/go-alpm/v2"
|
"github.com/Jguer/go-alpm/v2"
|
||||||
_ "github.com/jackc/pgx/v4/stdlib"
|
"github.com/Morganamilo/go-srcinfo"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/wercker/journalhook"
|
"github.com/wercker/journalhook"
|
||||||
|
"github.com/yargevad/filepathx"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
"regexp"
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
pacmanConf = "/usr/share/devtools/pacman-extra.conf"
|
||||||
|
makepkgConf = "/usr/share/devtools/makepkg-x86_64.conf"
|
||||||
|
logDir = "logs"
|
||||||
|
orgChrootName = "root"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
conf *Conf
|
conf = Conf{}
|
||||||
repos []string
|
repos []string
|
||||||
alpmHandle *alpm.Handle
|
reMarch = regexp.MustCompile(`(-march=)(.+?) `)
|
||||||
buildManager *BuildManager
|
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
|
||||||
db *ent.Client
|
rePkgFile = regexp.MustCompile(`^(.*)-.*-.*-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
|
||||||
journalLog = flag.Bool("journal", false, "Log to systemd journal instead of stdout")
|
buildManager BuildManager
|
||||||
checkInterval = flag.Int("interval", 5, "How often svn2git should be checked in minutes (default: 5)")
|
|
||||||
configFile = flag.String("config", "config.yaml", "set config file name/path")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type BuildPackage struct {
|
||||||
|
Pkgbase string
|
||||||
|
Pkgbuild string
|
||||||
|
Srcinfo *srcinfo.Srcinfo
|
||||||
|
PkgFiles []string
|
||||||
|
Repo string
|
||||||
|
March string
|
||||||
|
FullRepo string
|
||||||
|
}
|
||||||
|
|
||||||
|
type BuildManager struct {
|
||||||
|
toBuild chan *BuildPackage
|
||||||
|
toParse chan *BuildPackage
|
||||||
|
toPurge chan *BuildPackage
|
||||||
|
toRepoAdd chan *BuildPackage
|
||||||
|
exit bool
|
||||||
|
buildWG sync.WaitGroup
|
||||||
|
parseWG sync.WaitGroup
|
||||||
|
failedMutex sync.RWMutex
|
||||||
|
buildProcesses []*os.Process
|
||||||
|
buildProcMutex sync.RWMutex
|
||||||
|
stats struct {
|
||||||
|
fullyBuild int
|
||||||
|
eligible int
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Conf struct {
|
||||||
|
Arch string
|
||||||
|
Repos, March, Blacklist []string
|
||||||
|
Svn2git map[string]string
|
||||||
|
Basedir struct {
|
||||||
|
Repo, Chroot, Makepkg, Upstream string
|
||||||
|
}
|
||||||
|
Build struct {
|
||||||
|
Worker int
|
||||||
|
Makej int
|
||||||
|
}
|
||||||
|
Logging struct {
|
||||||
|
Level string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func check(e error) {
|
||||||
|
if e != nil {
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(s []string, str string) bool {
|
||||||
|
if i := find(s, str); i != -1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func find(s []string, str string) int {
|
||||||
|
for i, v := range s {
|
||||||
|
if v == str {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFile(src, dst string) (int64, error) {
|
||||||
|
sourceFileStat, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sourceFileStat.Mode().IsRegular() {
|
||||||
|
return 0, fmt.Errorf("%s is not a regular file", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
source, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer func(source *os.File) {
|
||||||
|
check(source.Close())
|
||||||
|
}(source)
|
||||||
|
|
||||||
|
destination, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer func(destination *os.File) {
|
||||||
|
check(destination.Close())
|
||||||
|
}(destination)
|
||||||
|
nBytes, err := io.Copy(destination, source)
|
||||||
|
return nBytes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//goland:noinspection SpellCheckingInspection
|
||||||
|
func setupMakepkg(march string) {
|
||||||
|
lMakepkg := filepath.Join(conf.Basedir.Makepkg, fmt.Sprintf("makepkg-%s.conf", march))
|
||||||
|
|
||||||
|
check(os.MkdirAll(conf.Basedir.Makepkg, os.ModePerm))
|
||||||
|
t, err := os.ReadFile(makepkgConf)
|
||||||
|
check(err)
|
||||||
|
makepkgStr := string(t)
|
||||||
|
|
||||||
|
makepkgStr = strings.ReplaceAll(makepkgStr, "-mtune=generic", "")
|
||||||
|
makepkgStr = strings.ReplaceAll(makepkgStr, "-O2", "-O3")
|
||||||
|
makepkgStr = strings.ReplaceAll(makepkgStr, " check ", " !check ")
|
||||||
|
makepkgStr = strings.ReplaceAll(makepkgStr, " color ", " !color ")
|
||||||
|
makepkgStr = strings.ReplaceAll(makepkgStr, "#MAKEFLAGS=\"-j2\"", "MAKEFLAGS=\"-j"+strconv.Itoa(conf.Build.Makej)+"\"")
|
||||||
|
makepkgStr = reMarch.ReplaceAllString(makepkgStr, "${1}"+march)
|
||||||
|
|
||||||
|
check(os.WriteFile(lMakepkg, []byte(makepkgStr), os.ModePerm))
|
||||||
|
}
|
||||||
|
|
||||||
|
func syncMarchs() {
|
||||||
|
files, err := os.ReadDir(conf.Basedir.Repo)
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
var eRepos []string
|
||||||
|
for _, file := range files {
|
||||||
|
if file.Name() != "." && file.Name() != logDir && file.IsDir() {
|
||||||
|
eRepos = append(eRepos, file.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, march := range conf.March {
|
||||||
|
setupMakepkg(march)
|
||||||
|
for _, repo := range conf.Repos {
|
||||||
|
tRepo := fmt.Sprintf("%s-%s", repo, march)
|
||||||
|
repos = append(repos, tRepo)
|
||||||
|
|
||||||
|
if _, err := os.Stat(filepath.Join(filepath.Join(conf.Basedir.Repo, tRepo, "os", conf.Arch))); os.IsNotExist(err) {
|
||||||
|
log.Debugf("Creating path %s", filepath.Join(conf.Basedir.Repo, tRepo, "os", conf.Arch))
|
||||||
|
check(os.MkdirAll(filepath.Join(conf.Basedir.Repo, tRepo, "os", conf.Arch), os.ModePerm))
|
||||||
|
}
|
||||||
|
|
||||||
|
if i := find(eRepos, tRepo); i != -1 {
|
||||||
|
eRepos = append(eRepos[:i], eRepos[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Repos: %s", repos)
|
||||||
|
|
||||||
|
for _, repo := range eRepos {
|
||||||
|
log.Infof("Removing old repo %s", repo)
|
||||||
|
check(os.RemoveAll(filepath.Join(conf.Basedir.Repo, repo)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func importKeys(pkg *BuildPackage) {
|
||||||
|
if pkg.Srcinfo.ValidPGPKeys != nil {
|
||||||
|
args := []string{"--keyserver", "keyserver.ubuntu.com", "--recv-keys"}
|
||||||
|
args = append(args, pkg.Srcinfo.ValidPGPKeys...)
|
||||||
|
cmd := backgroundCmd("gpg", args...)
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Unable to import keys: %s", string(res))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func increasePkgRel(pkg *BuildPackage) {
|
||||||
|
f, err := os.OpenFile(pkg.Pkgbuild, os.O_RDWR, os.ModePerm)
|
||||||
|
check(err)
|
||||||
|
defer func(f *os.File) {
|
||||||
|
check(f.Close())
|
||||||
|
}(f)
|
||||||
|
|
||||||
|
fStr, err := io.ReadAll(f)
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
nStr := rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+pkg.Srcinfo.Pkgrel+".1")
|
||||||
|
_, err = f.Seek(0, 0)
|
||||||
|
check(err)
|
||||||
|
check(f.Truncate(0))
|
||||||
|
|
||||||
|
_, err = f.WriteString(nStr)
|
||||||
|
check(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func gitClean(pkg *BuildPackage) {
|
||||||
|
cmd := backgroundCmd("sudo", "git_clean.sh", filepath.Dir(pkg.Pkgbuild))
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("git clean failed with %v:\n%s", err, res)
|
||||||
|
} else {
|
||||||
|
log.Debug(string(res))
|
||||||
|
}
|
||||||
|
check(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuildManager) buildWorker(id int) {
|
||||||
|
err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, 18)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[worker-%d] Failed to drop priority: %v", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case pkg := <-b.toBuild:
|
||||||
|
if b.exit {
|
||||||
|
log.Infof("Worker %d exited...", id)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
b.buildWG.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
log.Infof("[%s/%s] Build starting", pkg.FullRepo, pkg.Pkgbase)
|
||||||
|
|
||||||
|
importKeys(pkg)
|
||||||
|
increasePkgRel(pkg)
|
||||||
|
pkg.PkgFiles = []string{}
|
||||||
|
|
||||||
|
cmd := backgroundCmd("sh", "-c",
|
||||||
|
"cd "+filepath.Dir(pkg.Pkgbuild)+"&&makechrootpkg -c -D "+conf.Basedir.Makepkg+" -l worker-"+strconv.Itoa(id)+" -r "+conf.Basedir.Chroot+" -- "+
|
||||||
|
"--config "+filepath.Join(conf.Basedir.Makepkg, fmt.Sprintf("makepkg-%s.conf", pkg.March)))
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &out
|
||||||
|
|
||||||
|
check(cmd.Start())
|
||||||
|
|
||||||
|
b.buildProcMutex.Lock()
|
||||||
|
b.buildProcesses = append(b.buildProcesses, cmd.Process)
|
||||||
|
b.buildProcMutex.Unlock()
|
||||||
|
|
||||||
|
err := cmd.Wait()
|
||||||
|
|
||||||
|
b.buildProcMutex.Lock()
|
||||||
|
for i := range b.buildProcesses {
|
||||||
|
if b.buildProcesses[i].Pid == cmd.Process.Pid {
|
||||||
|
b.buildProcesses = append(b.buildProcesses[:i], b.buildProcesses[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.buildProcMutex.Unlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if b.exit {
|
||||||
|
gitClean(pkg)
|
||||||
|
b.buildWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Warningf("[%s/%s] Build failed, exit code %d", pkg.FullRepo, pkg.Pkgbase, cmd.ProcessState.ExitCode())
|
||||||
|
|
||||||
|
b.failedMutex.Lock()
|
||||||
|
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, pkg.FullRepo+"_failed.txt"), os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModePerm)
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
if pkg.Srcinfo.Epoch != "" {
|
||||||
|
_, err := f.WriteString(fmt.Sprintf("%s==%s:%s-%s\n", pkg.Pkgbase, pkg.Srcinfo.Epoch, pkg.Srcinfo.Pkgver, pkg.Srcinfo.Pkgrel))
|
||||||
|
check(err)
|
||||||
|
} else {
|
||||||
|
_, err := f.WriteString(fmt.Sprintf("%s==%s-%s\n", pkg.Pkgbase, pkg.Srcinfo.Pkgver, pkg.Srcinfo.Pkgrel))
|
||||||
|
check(err)
|
||||||
|
}
|
||||||
|
check(f.Close())
|
||||||
|
b.failedMutex.Unlock()
|
||||||
|
|
||||||
|
check(os.MkdirAll(filepath.Join(conf.Basedir.Repo, "logs"), os.ModePerm))
|
||||||
|
check(os.WriteFile(filepath.Join(conf.Basedir.Repo, "logs", pkg.Pkgbase+".log"), out.Bytes(), os.ModePerm))
|
||||||
|
|
||||||
|
gitClean(pkg)
|
||||||
|
b.buildWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst"))
|
||||||
|
check(err)
|
||||||
|
log.Debug(pkgFiles)
|
||||||
|
|
||||||
|
if len(pkgFiles) == 0 {
|
||||||
|
log.Warningf("No packages found after building %s. Abort build.", pkg.Pkgbase)
|
||||||
|
|
||||||
|
gitClean(pkg)
|
||||||
|
b.buildWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range pkgFiles {
|
||||||
|
cmd = backgroundCmd("gpg", "--batch", "--detach-sign", file)
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Failed to sign %s: %s", pkg.Pkgbase, err)
|
||||||
|
b.buildWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst*"))
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
for _, file := range copyFiles {
|
||||||
|
_, err = copyFile(file, filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, filepath.Base(file)))
|
||||||
|
if err != nil {
|
||||||
|
check(err)
|
||||||
|
b.buildWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Ext(file) != ".sig" {
|
||||||
|
pkg.PkgFiles = append(pkg.PkgFiles, filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, filepath.Base(file)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.toRepoAdd <- pkg
|
||||||
|
|
||||||
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, "logs", pkg.Pkgbase+".log")); err == nil {
|
||||||
|
check(os.Remove(filepath.Join(conf.Basedir.Repo, "logs", pkg.Pkgbase+".log")))
|
||||||
|
}
|
||||||
|
|
||||||
|
gitClean(pkg)
|
||||||
|
log.Infof("[%s/%s] Build successful (%s)", pkg.FullRepo, pkg.Pkgbase, time.Now().Sub(start))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuildManager) parseWorker() {
|
||||||
|
for {
|
||||||
|
if b.exit {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case pkg := <-b.toParse:
|
||||||
|
cmd := backgroundCmd("sh", "-c", "cd "+filepath.Dir(pkg.Pkgbuild)+"&&"+"makepkg --printsrcinfo")
|
||||||
|
res, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Failed generate SRCINFO for %s: %v", pkg.Pkgbase, err)
|
||||||
|
b.parseWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := srcinfo.Parse(string(res))
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Failed to parse SRCINFO for %s: %v", pkg.Pkgbase, err)
|
||||||
|
b.parseWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pkg.Srcinfo = info
|
||||||
|
|
||||||
|
if contains(info.Arch, "any") || contains(conf.Blacklist, info.Pkgbase) {
|
||||||
|
log.Infof("Skipped %s: blacklisted or any-Package", info.Pkgbase)
|
||||||
|
b.toPurge <- pkg
|
||||||
|
b.parseWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isPkgFailed(pkg) {
|
||||||
|
log.Infof("Skipped %s: failed build", info.Pkgbase)
|
||||||
|
b.toPurge <- pkg
|
||||||
|
b.parseWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkgVer string
|
||||||
|
if pkg.Srcinfo.Epoch == "" {
|
||||||
|
pkgVer = pkg.Srcinfo.Pkgver + "-" + pkg.Srcinfo.Pkgrel
|
||||||
|
} else {
|
||||||
|
pkgVer = pkg.Srcinfo.Epoch + ":" + pkg.Srcinfo.Pkgver + "-" + pkg.Srcinfo.Pkgrel
|
||||||
|
}
|
||||||
|
|
||||||
|
repoVer := getVersionFromRepo(pkg)
|
||||||
|
if repoVer != "" && alpm.VerCmp(repoVer, pkgVer) > 0 {
|
||||||
|
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", info.Pkgbase, pkgVer, repoVer)
|
||||||
|
b.stats.eligible++
|
||||||
|
b.stats.fullyBuild++
|
||||||
|
b.parseWG.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b.stats.eligible++
|
||||||
|
b.parseWG.Done()
|
||||||
|
b.toBuild <- pkg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func findPkgFiles(pkg *BuildPackage) {
|
||||||
|
pkgs, err := os.ReadDir(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch))
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
var fPkg []string
|
||||||
|
for _, file := range pkgs {
|
||||||
|
if !file.IsDir() && !strings.HasSuffix(file.Name(), ".sig") {
|
||||||
|
matches := rePkgFile.FindStringSubmatch(file.Name())
|
||||||
|
|
||||||
|
var realPkgs []string
|
||||||
|
for _, realPkg := range pkg.Srcinfo.Packages {
|
||||||
|
realPkgs = append(realPkgs, realPkg.Pkgname)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches) > 1 && contains(realPkgs, matches[1]) {
|
||||||
|
fPkg = append(fPkg, filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, file.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pkg.PkgFiles = fPkg
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVersionFromRepo(pkg *BuildPackage) string {
|
||||||
|
findPkgFiles(pkg)
|
||||||
|
|
||||||
|
if len(pkg.PkgFiles) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
fNameSplit := strings.Split(pkg.PkgFiles[0], "-")
|
||||||
|
return fNameSplit[len(fNameSplit)-3] + "-" + fNameSplit[len(fNameSplit)-2]
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPkgFailed(pkg *BuildPackage) bool {
|
||||||
|
buildManager.failedMutex.Lock()
|
||||||
|
defer buildManager.failedMutex.Unlock()
|
||||||
|
|
||||||
|
file, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, pkg.FullRepo+"_failed.txt"), os.O_RDWR|os.O_CREATE|os.O_SYNC, os.ModePerm)
|
||||||
|
check(err)
|
||||||
|
defer func(file *os.File) {
|
||||||
|
check(file.Close())
|
||||||
|
}(file)
|
||||||
|
|
||||||
|
failed := false
|
||||||
|
var newContent []string
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
splitPkg := strings.Split(line, "==")
|
||||||
|
|
||||||
|
if splitPkg[0] == pkg.Pkgbase {
|
||||||
|
var pkgVer string
|
||||||
|
if pkg.Srcinfo.Epoch == "" {
|
||||||
|
pkgVer = pkg.Srcinfo.Pkgver + "-" + pkg.Srcinfo.Pkgrel
|
||||||
|
} else {
|
||||||
|
pkgVer = pkg.Srcinfo.Epoch + ":" + pkg.Srcinfo.Pkgver + "-" + pkg.Srcinfo.Pkgrel
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to build new versions of previously failed packages
|
||||||
|
if alpm.VerCmp(splitPkg[1], pkgVer) < 0 {
|
||||||
|
failed = false
|
||||||
|
} else {
|
||||||
|
failed = true
|
||||||
|
newContent = append(newContent, line+"\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
newContent = append(newContent, line+"\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check(scanner.Err())
|
||||||
|
sort.Strings(newContent)
|
||||||
|
|
||||||
|
_, err = file.Seek(0, 0)
|
||||||
|
check(err)
|
||||||
|
check(file.Truncate(0))
|
||||||
|
_, err = file.WriteString(strings.Join(newContent, ""))
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
return failed
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupChroot() {
|
||||||
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Chroot, orgChrootName)); err == nil {
|
||||||
|
//goland:noinspection SpellCheckingInspection
|
||||||
|
cmd := backgroundCmd("arch-nspawn", filepath.Join(conf.Basedir.Chroot, orgChrootName), "pacman", "-Syuu", "--noconfirm")
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
err := os.MkdirAll(conf.Basedir.Chroot, os.ModePerm)
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
cmd := backgroundCmd("mkarchroot", "-C", pacmanConf, filepath.Join(conf.Basedir.Chroot, orgChrootName), "base-devel")
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
} else {
|
||||||
|
check(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuildManager) repoWorker() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case pkg := <-b.toRepoAdd:
|
||||||
|
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
||||||
|
args = append(args, pkg.PkgFiles...)
|
||||||
|
cmd := backgroundCmd("repo-add", args...)
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("%v while repo-add: %s", err, string(res))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = backgroundCmd("paccache",
|
||||||
|
"-rc", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch),
|
||||||
|
"-k", "1")
|
||||||
|
res, err = cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
b.buildWG.Done()
|
||||||
|
case pkg := <-b.toPurge:
|
||||||
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(pkg.PkgFiles) == 0 {
|
||||||
|
findPkgFiles(pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
var realPkgs []string
|
||||||
|
for _, realPkg := range pkg.Srcinfo.Packages {
|
||||||
|
realPkgs = append(realPkgs, realPkg.Pkgname)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
||||||
|
args = append(args, realPkgs...)
|
||||||
|
cmd := backgroundCmd("repo-remove", args...)
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
||||||
|
log.Debugf("Deleteing package %s failed: Package not found in database", pkg.Pkgbase)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range pkg.PkgFiles {
|
||||||
|
check(os.Remove(file))
|
||||||
|
check(os.Remove(file + ".sig"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func backgroundCmd(name string, arg ...string) *exec.Cmd {
|
||||||
|
cmd := exec.Command(name, arg...)
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Setpgid: true,
|
||||||
|
// Pgid: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuildManager) syncWorker() {
|
||||||
|
check(os.MkdirAll(conf.Basedir.Upstream, os.ModePerm))
|
||||||
|
|
||||||
|
for i := 0; i < conf.Build.Worker; i++ {
|
||||||
|
go b.buildWorker(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < runtime.NumCPU(); i++ {
|
||||||
|
go b.parseWorker()
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
b.buildWG.Wait()
|
||||||
|
for gitDir, gitURL := range conf.Svn2git {
|
||||||
|
gitPath := filepath.Join(conf.Basedir.Upstream, gitDir)
|
||||||
|
|
||||||
|
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||||
|
cmd := backgroundCmd("git", "clone", "--depth=1", gitURL, gitPath)
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
} else if err == nil {
|
||||||
|
cmd := backgroundCmd("sudo", "git_clean.sh", gitPath)
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
cmd = backgroundCmd("sh", "-c", "cd "+gitPath+" && git reset --hard")
|
||||||
|
res, err = cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
cmd = backgroundCmd("sh", "-c", "cd "+gitPath+" && git pull")
|
||||||
|
res, err = cmd.CombinedOutput()
|
||||||
|
log.Debug(string(res))
|
||||||
|
check(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch updates between sync runs
|
||||||
|
setupChroot()
|
||||||
|
|
||||||
|
pkgBuilds, err := filepathx.Glob(filepath.Join(conf.Basedir.Upstream, "/**/PKGBUILD"))
|
||||||
|
check(err)
|
||||||
|
|
||||||
|
// Shuffle pkgbuilds to spread out long-running builds, otherwise pkgBuilds is alphabetically-sorted
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
rand.Shuffle(len(pkgBuilds), func(i, j int) { pkgBuilds[i], pkgBuilds[j] = pkgBuilds[j], pkgBuilds[i] })
|
||||||
|
|
||||||
|
for _, pkgbuild := range pkgBuilds {
|
||||||
|
if b.exit {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sPkgbuild := strings.Split(pkgbuild, "/")
|
||||||
|
repo := sPkgbuild[len(sPkgbuild)-2]
|
||||||
|
|
||||||
|
if repo == "trunk" || !contains(conf.Repos, strings.Split(repo, "-")[0]) || strings.Contains(repo, "i686") || strings.Contains(repo, "testing") || strings.Contains(repo, "staging") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, march := range conf.March {
|
||||||
|
b.parseWG.Add(1)
|
||||||
|
b.toParse <- &BuildPackage{
|
||||||
|
Pkgbuild: pkgbuild,
|
||||||
|
Pkgbase: sPkgbuild[len(sPkgbuild)-4],
|
||||||
|
Repo: strings.Split(repo, "-")[0],
|
||||||
|
March: march,
|
||||||
|
FullRepo: strings.Split(repo, "-")[0] + "-" + march,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.parseWG.Wait()
|
||||||
|
if b.stats.eligible != 0 {
|
||||||
|
log.Infof("Processed source-repos. %d packages elegible to be build, %d already fully build. Covering %f%% of offical-repo (buildable) packages.", b.stats.eligible, b.stats.fullyBuild, float32(b.stats.fullyBuild)/float32(b.stats.eligible)*100.0)
|
||||||
|
}
|
||||||
|
b.stats.fullyBuild = 0
|
||||||
|
b.stats.eligible = 0
|
||||||
|
time.Sleep(5 * time.Minute)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
killSignals := make(chan os.Signal, 1)
|
killSignals := make(chan os.Signal, 1)
|
||||||
signal.Notify(killSignals, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(killSignals, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
reloadSignals := make(chan os.Signal, 1)
|
confStr, err := os.ReadFile("config.yaml")
|
||||||
signal.Notify(reloadSignals, syscall.SIGUSR1)
|
check(err)
|
||||||
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
confStr, err := os.ReadFile(*configFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error reading config file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = yaml.Unmarshal(confStr, &conf)
|
err = yaml.Unmarshal(confStr, &conf)
|
||||||
if err != nil {
|
check(err)
|
||||||
log.Fatalf("error parsing config file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||||
if err != nil {
|
check(err)
|
||||||
log.Fatalf("error parsing log level from config: %v", err)
|
|
||||||
}
|
|
||||||
log.SetLevel(lvl)
|
log.SetLevel(lvl)
|
||||||
if *journalLog {
|
journalhook.Enable()
|
||||||
journalhook.Enable()
|
|
||||||
}
|
|
||||||
|
|
||||||
err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, 5)
|
err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("failed to drop priority: %v", err)
|
log.Warningf("Failed to drop priority: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.MkdirAll(conf.Basedir.Repo, 0o755)
|
err = os.MkdirAll(conf.Basedir.Repo, os.ModePerm)
|
||||||
if err != nil {
|
check(err)
|
||||||
log.Fatalf("error creating repo dir: %v", err)
|
|
||||||
|
buildManager = BuildManager{
|
||||||
|
toBuild: make(chan *BuildPackage, 10000),
|
||||||
|
toParse: make(chan *BuildPackage, 10000),
|
||||||
|
toPurge: make(chan *BuildPackage, conf.Build.Worker),
|
||||||
|
toRepoAdd: make(chan *BuildPackage, conf.Build.Worker),
|
||||||
|
exit: false,
|
||||||
|
buildWG: sync.WaitGroup{},
|
||||||
|
failedMutex: sync.RWMutex{},
|
||||||
|
stats: struct {
|
||||||
|
fullyBuild int
|
||||||
|
eligible int
|
||||||
|
}{fullyBuild: 0, eligible: 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.DB.Driver == "pgx" {
|
setupChroot()
|
||||||
pdb, err := sql.Open("pgx", conf.DB.ConnectTo)
|
syncMarchs()
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to open database %s: %v", conf.DB.ConnectTo, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
drv := sql.OpenDB(dialect.Postgres, pdb.DB())
|
go buildManager.repoWorker()
|
||||||
db = ent.NewClient(ent.Driver(drv))
|
go buildManager.syncWorker()
|
||||||
} else {
|
|
||||||
db, err = ent.Open(conf.DB.Driver, conf.DB.ConnectTo)
|
<-killSignals
|
||||||
if err != nil {
|
|
||||||
log.Panicf("failed to open database %s: %v", conf.DB.ConnectTo, err)
|
buildManager.exit = true
|
||||||
}
|
buildManager.buildProcMutex.RLock()
|
||||||
defer func(Client *ent.Client) {
|
for _, p := range buildManager.buildProcesses {
|
||||||
_ = Client.Close()
|
pgid, err := syscall.Getpgid(p.Pid)
|
||||||
}(db)
|
check(err)
|
||||||
|
|
||||||
|
check(syscall.Kill(-pgid, syscall.SIGTERM))
|
||||||
}
|
}
|
||||||
|
buildManager.buildProcMutex.RUnlock()
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
buildManager.buildWG.Wait()
|
||||||
if err := db.Schema.Create(ctx, migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
|
|
||||||
log.Panicf("automigrate failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buildManager = &BuildManager{
|
|
||||||
repoPurge: make(map[string]chan []*ProtoPackage),
|
|
||||||
repoAdd: make(map[string]chan []*ProtoPackage),
|
|
||||||
queueSignal: make(chan struct{}),
|
|
||||||
alpmMutex: new(sync.RWMutex),
|
|
||||||
building: []*ProtoPackage{},
|
|
||||||
buildingLock: new(sync.RWMutex),
|
|
||||||
repoWG: new(sync.WaitGroup),
|
|
||||||
}
|
|
||||||
|
|
||||||
buildManager.setupMetrics(conf.Metrics.Port)
|
|
||||||
|
|
||||||
err = setupChroot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("unable to setup chroot: %v", err)
|
|
||||||
}
|
|
||||||
err = syncMarchs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("error syncing marchs: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("error while ALPM-init: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
_ = buildManager.syncWorker(ctx)
|
|
||||||
}()
|
|
||||||
|
|
||||||
killLoop:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-killSignals:
|
|
||||||
break killLoop
|
|
||||||
case <-reloadSignals:
|
|
||||||
confStr, err := os.ReadFile(*configFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("unable to open config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = yaml.Unmarshal(confStr, &conf)
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("unable to parse config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("failure setting logging level: %v", err)
|
|
||||||
}
|
|
||||||
log.SetLevel(lvl)
|
|
||||||
log.Infof("config reloaded")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
buildManager.repoWG.Wait()
|
|
||||||
_ = alpmHandle.Release()
|
|
||||||
}
|
}
|
||||||
|
26
metrics.go
26
metrics.go
@@ -1,26 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *BuildManager) setupMetrics(port uint32) {
|
|
||||||
b.metrics.queueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Name: "build_queue_size",
|
|
||||||
Help: "Build queue size",
|
|
||||||
}, []string{"repository", "status"})
|
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
mux.Handle("/", promhttp.Handler())
|
|
||||||
go func() {
|
|
||||||
err := http.ListenAndServe(fmt.Sprintf(":%d", port), mux) //nolint:gosec
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to start metrics server: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
94
package.go
94
package.go
@@ -1,94 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqljson"
|
|
||||||
"fmt"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Package string
|
|
||||||
|
|
||||||
// Name returns package's name
|
|
||||||
func (pkg Package) Name() string {
|
|
||||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
|
||||||
return strings.Join(fNameSplit[:len(fNameSplit)-3], "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// MArch returns package's march
|
|
||||||
func (pkg Package) MArch() *string {
|
|
||||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
|
||||||
res := strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
|
|
||||||
return &res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Repo returns package's dbpackage.Repository
|
|
||||||
func (pkg Package) Repo() dbpackage.Repository {
|
|
||||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
|
||||||
return dbpackage.Repository(strings.Split(splitPath[len(splitPath)-4], "-")[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullRepo returns package's dbpackage.Repository-march
|
|
||||||
func (pkg Package) FullRepo() *string {
|
|
||||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
|
||||||
return &splitPath[len(splitPath)-4]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns version extracted from package
|
|
||||||
func (pkg Package) Version() string {
|
|
||||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
|
||||||
return strings.Join(fNameSplit[len(fNameSplit)-3:len(fNameSplit)-1], "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Arch returns package's Architecture
|
|
||||||
func (pkg Package) Arch() *string {
|
|
||||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
|
||||||
fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".")
|
|
||||||
return &fNameSplit[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasValidSignature returns if package has valid detached signature file
|
|
||||||
func (pkg Package) HasValidSignature() (bool, error) {
|
|
||||||
cmd := exec.Command("gpg", "--verify", string(pkg)+".sig") //nolint:gosec
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
switch {
|
|
||||||
case cmd.ProcessState.ExitCode() == 2 || cmd.ProcessState.ExitCode() == 1:
|
|
||||||
return false, nil
|
|
||||||
case cmd.ProcessState.ExitCode() == 0:
|
|
||||||
return true, nil
|
|
||||||
case err != nil:
|
|
||||||
return false, fmt.Errorf("error checking signature: %w (%s)", err, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackage returns ent.DBPackage for package
|
|
||||||
func (pkg Package) DBPackage(ctx context.Context, db *ent.Client) (*ent.DBPackage, error) {
|
|
||||||
return pkg.DBPackageIsolated(ctx, *pkg.MArch(), pkg.Repo(), db)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
|
|
||||||
func (pkg Package) DBPackageIsolated(ctx context.Context, march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
|
|
||||||
dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
|
|
||||||
s.Where(
|
|
||||||
sql.And(
|
|
||||||
sqljson.ValueContains(dbpackage.FieldPackages, pkg.Name()),
|
|
||||||
sql.EQ(dbpackage.FieldMarch, march),
|
|
||||||
sql.EQ(dbpackage.FieldRepository, repo)),
|
|
||||||
)
|
|
||||||
}).Only(ctx)
|
|
||||||
if ent.IsNotFound(err) {
|
|
||||||
log.Debugf("not found in database: %s", pkg.Name())
|
|
||||||
return nil, err
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dbPkg, nil
|
|
||||||
}
|
|
780
proto_package.go
780
proto_package.go
@@ -1,780 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/Jguer/go-alpm/v2"
|
|
||||||
"github.com/Morganamilo/go-srcinfo"
|
|
||||||
"github.com/c2h5oh/datasize"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/otiai10/copy"
|
|
||||||
"github.com/sethvargo/go-retry"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ProtoPackage struct {
|
|
||||||
Pkgbase string
|
|
||||||
Srcinfo *srcinfo.Srcinfo
|
|
||||||
Arch string
|
|
||||||
PkgFiles []string
|
|
||||||
Repo dbpackage.Repository
|
|
||||||
March string
|
|
||||||
FullRepo string
|
|
||||||
Version string
|
|
||||||
DBPackage *ent.DBPackage
|
|
||||||
Pkgbuild string
|
|
||||||
State *StateInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrorNotEligible = errors.New("package is not eligible")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (p *ProtoPackage) isEligible(ctx context.Context) bool {
|
|
||||||
skipping := false
|
|
||||||
switch {
|
|
||||||
case p.Arch == "any":
|
|
||||||
log.Debugf("skipped %s: any-package", p.Pkgbase)
|
|
||||||
p.DBPackage.SkipReason = "arch = any"
|
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
|
||||||
skipping = true
|
|
||||||
case MatchGlobList(p.Pkgbase, conf.Blacklist.Packages):
|
|
||||||
log.Debugf("skipped %s: package on no-build list", p.Pkgbase)
|
|
||||||
p.DBPackage.SkipReason = "blacklisted"
|
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
|
||||||
skipping = true
|
|
||||||
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: //nolint:gosec
|
|
||||||
log.Debugf("skipped %s: memory limit exceeded (%s)", p.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) //nolint:gosec
|
|
||||||
p.DBPackage.SkipReason = "memory limit exceeded"
|
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
|
||||||
skipping = true
|
|
||||||
case p.isPkgFailed():
|
|
||||||
log.Debugf("skipped %s: failed build", p.Pkgbase)
|
|
||||||
skipping = true
|
|
||||||
case p.Srcinfo != nil:
|
|
||||||
// skip haskell packages, since they cannot be optimized currently (no -O3 & march has no effect as far as I know)
|
|
||||||
if Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
|
||||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc") {
|
|
||||||
log.Debugf("skipped %s: haskell", p.Pkgbase)
|
|
||||||
p.DBPackage.SkipReason = "haskell"
|
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
|
||||||
skipping = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if skipping {
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SetStatus(p.DBPackage.Status).
|
|
||||||
SetSkipReason(p.DBPackage.SkipReason).SetTagRev(p.State.TagRev).SaveX(ctx)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SaveX(ctx)
|
|
||||||
|
|
||||||
if Contains(conf.Blacklist.LTO, p.Pkgbase) && p.DBPackage.Lto != dbpackage.LtoDisabled {
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
repoVer, err := p.repoVersion()
|
|
||||||
if err != nil {
|
|
||||||
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
|
|
||||||
} else if alpm.VerCmp(repoVer, p.Version) > 0 {
|
|
||||||
log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", p.Pkgbase, p.Version, repoVer)
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(p.State.TagRev).SaveX(ctx)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|
||||||
start := time.Now().UTC()
|
|
||||||
chroot := "build_" + uuid.New().String()
|
|
||||||
|
|
||||||
buildFolder, err := p.setupBuildDir(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
chroot := chroot
|
|
||||||
log.Debugf("removing chroot %s", chroot)
|
|
||||||
err := cleanBuildDir(buildFolder, filepath.Join(conf.Basedir.Work, chrootDir, chroot))
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error removing builddir/chroot %s/%s: %v", buildDir, chroot, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = p.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error generating srcinfo: %w", err)
|
|
||||||
}
|
|
||||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetPackages(packages2slice(p.Srcinfo.Packages)).SaveX(ctx)
|
|
||||||
|
|
||||||
// skip haskell packages, since they cannot be optimized currently (no -O3 & march has no effect as far as I know)
|
|
||||||
if Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
|
||||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc") {
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("haskell").SetTagRev(p.State.TagRev).SaveX(ctx)
|
|
||||||
buildManager.repoPurge[p.FullRepo] <- []*ProtoPackage{p}
|
|
||||||
return time.Since(start), ErrorNotEligible
|
|
||||||
}
|
|
||||||
|
|
||||||
isLatest, local, syncVersion, err := p.isMirrorLatest(alpmHandle)
|
|
||||||
if err != nil {
|
|
||||||
var multipleStateFilesError MultipleStateFilesError
|
|
||||||
var unableToSatisfyError UnableToSatisfyError
|
|
||||||
switch {
|
|
||||||
default:
|
|
||||||
return time.Since(start), fmt.Errorf("error solving deps: %w", err)
|
|
||||||
case errors.As(err, &multipleStateFilesError):
|
|
||||||
log.Infof("skipped %s: multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
|
||||||
return time.Since(start), err
|
|
||||||
case errors.As(err, &unableToSatisfyError):
|
|
||||||
log.Infof("skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
|
||||||
return time.Since(start), ErrorNotEligible
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isLatest {
|
|
||||||
if local != nil {
|
|
||||||
log.Infof("delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)",
|
|
||||||
p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
|
|
||||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).
|
|
||||||
SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
|
|
||||||
|
|
||||||
// Returning an error here causes the package to be purged.
|
|
||||||
// Purge delayed packages in case delay is caused by inconsistencies in state.
|
|
||||||
// Worst case would be clients downloading a package update twice, once from their official mirror,
|
|
||||||
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
|
|
||||||
// in an outdated version.
|
|
||||||
if time.Since(local.BuildDate()).Hours() >= 48 && p.DBPackage.RepoVersion != "" {
|
|
||||||
return time.Since(start), errors.New("overdue package waiting")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Infof("delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
|
|
||||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Since(start), ErrorNotEligible
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("[P] build starting: %s->%s->%s", p.FullRepo, p.Pkgbase, p.Version)
|
|
||||||
|
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
|
|
||||||
|
|
||||||
err = p.importKeys()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[P] failed to import pgp keys for %s->%s->%s: %v", p.FullRepo, p.Pkgbase, p.Version, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buildNo := 1
|
|
||||||
versionSlice := strings.Split(p.DBPackage.LastVersionBuild, ".")
|
|
||||||
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version {
|
|
||||||
buildNo, err = strconv.Atoi(versionSlice[len(versionSlice)-1])
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error while reading buildNo from pkgrel: %w", err)
|
|
||||||
}
|
|
||||||
buildNo++
|
|
||||||
}
|
|
||||||
|
|
||||||
err = p.increasePkgRel(buildNo)
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error while increasing pkgrel: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.PkgFiles = []string{}
|
|
||||||
|
|
||||||
// default to LTO
|
|
||||||
makepkgFile := makepkg
|
|
||||||
if p.DBPackage.Lto == dbpackage.LtoDisabled || p.DBPackage.Lto == dbpackage.LtoAutoDisabled {
|
|
||||||
// use non-lto makepkg.conf if LTO is blacklisted for this package
|
|
||||||
makepkgFile = makepkgLTO
|
|
||||||
}
|
|
||||||
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec
|
|
||||||
"-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config",
|
|
||||||
filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
|
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
|
||||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
|
||||||
var out bytes.Buffer
|
|
||||||
cmd.Stdout = &out
|
|
||||||
cmd.Stderr = &out
|
|
||||||
|
|
||||||
if err = cmd.Start(); err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error starting build: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error getting PGID: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
done := make(chan bool)
|
|
||||||
result := make(chan int64)
|
|
||||||
go pollMemoryUsage(pgid, 1*time.Second, done, result)
|
|
||||||
|
|
||||||
err = cmd.Wait()
|
|
||||||
close(done)
|
|
||||||
peakMem := <-result
|
|
||||||
close(result)
|
|
||||||
|
|
||||||
Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage)
|
|
||||||
if !ok {
|
|
||||||
log.Panicf("rusage is not of type *syscall.Rusage, are we running on unix-like?")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return time.Since(start), ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.DBPackage.Lto != dbpackage.LtoAutoDisabled && p.DBPackage.Lto != dbpackage.LtoDisabled &&
|
|
||||||
(reLdError.MatchString(out.String()) || reRustLTOError.MatchString(out.String())) {
|
|
||||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
|
|
||||||
return time.Since(start), errors.New("ld/lto-incompatibility error detected, LTO disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
if reDownloadError.MatchString(out.String()) || reDownloadError2.MatchString(out.String()) ||
|
|
||||||
rePortError.MatchString(out.String()) || reSigError.MatchString(out.String()) {
|
|
||||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
|
|
||||||
return time.Since(start), errors.New("known build error detected")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error creating logdir: %w", err)
|
|
||||||
}
|
|
||||||
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), //nolint:gosec
|
|
||||||
[]byte(strings.ToValidUTF8(out.String(), "")), 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error warting to logdir: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.DBPackage.Update().
|
|
||||||
SetStatus(dbpackage.StatusFailed).
|
|
||||||
ClearSkipReason().
|
|
||||||
SetBuildTimeStart(start).
|
|
||||||
ClearMaxRss().
|
|
||||||
ClearLastVersionBuild().
|
|
||||||
ClearIoOut().
|
|
||||||
ClearIoIn().
|
|
||||||
ClearUTime().
|
|
||||||
ClearSTime().
|
|
||||||
SetTagRev(p.State.TagRev).
|
|
||||||
ExecX(ctx)
|
|
||||||
return time.Since(start), fmt.Errorf("build failed: exit code %d", cmd.ProcessState.ExitCode())
|
|
||||||
}
|
|
||||||
|
|
||||||
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*.pkg.tar.zst"))
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error scanning builddir for artifacts: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pkgFiles) == 0 {
|
|
||||||
return time.Since(start), errors.New("no build-artifacts found")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range pkgFiles {
|
|
||||||
cmd = exec.Command("gpg", "--batch", "--detach-sign", file)
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error while signing artifact: %w (%s)", err, string(res))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*.pkg.tar.zst*"))
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error scanning builddir for artifacts: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
holdingDir := filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo)
|
|
||||||
for _, file := range copyFiles {
|
|
||||||
err = os.MkdirAll(holdingDir, 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error creating %s: %w", holdingDir, err)
|
|
||||||
}
|
|
||||||
err = copy.Copy(file, filepath.Join(holdingDir, filepath.Base(file)))
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error while copying file to %s: %w", filepath.Join(holdingDir, filepath.Base(file)), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if filepath.Ext(file) != ".sig" {
|
|
||||||
p.PkgFiles = append(p.PkgFiles, filepath.Join(holdingDir, filepath.Base(file)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log")); err == nil {
|
|
||||||
err := os.Remove(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"))
|
|
||||||
if err != nil {
|
|
||||||
return time.Since(start), fmt.Errorf("error removing log: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
updatePkg := p.DBPackage.Update().
|
|
||||||
SetStatus(dbpackage.StatusBuilt).
|
|
||||||
SetLto(dbpackage.LtoEnabled).
|
|
||||||
SetBuildTimeStart(start).
|
|
||||||
SetLastVersionBuild(p.Version).
|
|
||||||
SetTagRev(p.State.TagRev).
|
|
||||||
SetMaxRss(peakMem).
|
|
||||||
SetIoOut(Rusage.Oublock).
|
|
||||||
SetIoIn(Rusage.Inblock).
|
|
||||||
SetUTime(Rusage.Utime.Sec).
|
|
||||||
SetSTime(Rusage.Stime.Sec)
|
|
||||||
|
|
||||||
if p.DBPackage.Lto != dbpackage.LtoDisabled && p.DBPackage.Lto != dbpackage.LtoAutoDisabled {
|
|
||||||
updatePkg.SetLto(dbpackage.LtoEnabled)
|
|
||||||
}
|
|
||||||
|
|
||||||
updatePkg.ExecX(ctx)
|
|
||||||
|
|
||||||
return time.Since(start), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) setupBuildDir(ctx context.Context) (string, error) {
|
|
||||||
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
|
|
||||||
|
|
||||||
err := cleanBuildDir(buildDir, "")
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("removing old builddir failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.MkdirAll(buildDir, 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
gitlabPath := reReplaceSinglePlus.ReplaceAllString(p.Pkgbase, "$1-$2")
|
|
||||||
gitlabPath = reReplaceRemainingPlus.ReplaceAllString(gitlabPath, "plus")
|
|
||||||
gitlabPath = reReplaceSpecialChars.ReplaceAllString(gitlabPath, "-")
|
|
||||||
gitlabPath = reReplaceUnderscore.ReplaceAllString(gitlabPath, "-")
|
|
||||||
gitlabPath = reReplaceTree.ReplaceAllString(gitlabPath, "unix-tree")
|
|
||||||
|
|
||||||
gr := retry.NewFibonacci(10 * time.Second)
|
|
||||||
gr = retry.WithMaxRetries(conf.MaxCloneRetries, gr)
|
|
||||||
|
|
||||||
if err := retry.Do(ctx, gr, func(ctx context.Context) error {
|
|
||||||
cmd := exec.CommandContext(ctx, "git", "clone", "--depth", "1", "--branch", p.State.TagVer, //nolint:gosec
|
|
||||||
fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s.git", gitlabPath), buildDir)
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return retry.RetryableError(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD")
|
|
||||||
|
|
||||||
return buildDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) repoVersion() (string, error) {
|
|
||||||
if err := p.findPkgFiles(); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p.PkgFiles) == 0 {
|
|
||||||
return "", errors.New("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
fNameSplit := strings.Split(p.PkgFiles[0], "-")
|
|
||||||
return fNameSplit[len(fNameSplit)-3] + "-" + fNameSplit[len(fNameSplit)-2], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
|
||||||
if p.Srcinfo == nil {
|
|
||||||
err := p.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error generating srcinfo: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Version == "" {
|
|
||||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(f *os.File) {
|
|
||||||
err := f.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}(f)
|
|
||||||
|
|
||||||
fStr, err := io.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// increase buildno if already existing
|
|
||||||
var nStr string
|
|
||||||
if strings.Contains(p.Srcinfo.Pkgrel, ".") {
|
|
||||||
pkgRelSplit := strings.Split(p.Srcinfo.Pkgrel, ".")
|
|
||||||
pkgRelBuildNo, err := strconv.Atoi(pkgRelSplit[len(pkgRelSplit)-1])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
nStr = rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+pkgRelSplit[0]+"."+strconv.Itoa(buildNo+pkgRelBuildNo))
|
|
||||||
versionSplit := strings.Split(p.Version, "-")
|
|
||||||
versionSplit[len(versionSplit)-1] = pkgRelSplit[0] + "." + strconv.Itoa(buildNo+pkgRelBuildNo)
|
|
||||||
p.Version = strings.Join(versionSplit, "-")
|
|
||||||
} else {
|
|
||||||
nStr = rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+p.Srcinfo.Pkgrel+"."+strconv.Itoa(buildNo))
|
|
||||||
p.Version += "." + strconv.Itoa(buildNo)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.Seek(0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = f.Truncate(0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.WriteString(nStr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) importKeys() error {
|
|
||||||
if p.Srcinfo == nil {
|
|
||||||
err := p.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error generating srcinfo: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Srcinfo.ValidPGPKeys != nil {
|
|
||||||
args := []string{"--keyserver", "keyserver.ubuntu.com", "--recv-keys"}
|
|
||||||
args = append(args, p.Srcinfo.ValidPGPKeys...)
|
|
||||||
cmd := exec.Command("gpg", args...)
|
|
||||||
_, err := cmd.CombinedOutput()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) isAvailable(ctx context.Context, h *alpm.Handle) bool {
|
|
||||||
dbs, err := h.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
defer buildManager.alpmMutex.Unlock()
|
|
||||||
|
|
||||||
var pkg alpm.IPackage
|
|
||||||
switch {
|
|
||||||
case p.Srcinfo != nil:
|
|
||||||
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
|
||||||
case p.DBPackage != nil && len(p.DBPackage.Packages) > 0:
|
|
||||||
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
|
|
||||||
default:
|
|
||||||
cmd := exec.CommandContext(ctx, "unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String(), //nolint:gosec
|
|
||||||
"--sysroot="+filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
|
||||||
var res []byte
|
|
||||||
res, err = cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
|
|
||||||
return false
|
|
||||||
} else if len(res) == 0 {
|
|
||||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(strings.Split(strings.TrimSpace(string(res)), "\n")) > 0 {
|
|
||||||
pacsiftLines := strings.Split(strings.TrimSpace(string(res)), "\n")
|
|
||||||
|
|
||||||
var splitPkgs []string
|
|
||||||
for _, line := range pacsiftLines {
|
|
||||||
splitPkgs = append(splitPkgs, strings.Split(line, "/")[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.DBPackage != nil {
|
|
||||||
p.DBPackage, err = p.DBPackage.Update().SetPackages(splitPkgs).Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pkg, err = dbs.FindSatisfier(splitPkgs[0])
|
|
||||||
} else {
|
|
||||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("error resolving %s: %v", p.Pkgbase, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DB().Name() != p.Repo.String() || pkg.Base() != p.Pkgbase {
|
|
||||||
log.Debugf("%s: repo (%s!=%s) or pkgbase (%s!=%s) does not match", p.Pkgbase, pkg.DB().Name(), p.Repo.String(), pkg.Base(), p.Pkgbase)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Srcinfo != nil && (!Contains(p.Srcinfo.Arch, pkg.Architecture()) || p.Srcinfo.Pkgbase != pkg.Base()) {
|
|
||||||
log.Debugf("%s: arch (%s!=%s) or pkgbase (%s!=%s) does not match", p.Pkgbase, p.Srcinfo.Arch[0],
|
|
||||||
pkg.Architecture(), pkg.Base(), p.Pkgbase)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) GitVersion(h *alpm.Handle) (string, error) {
|
|
||||||
if p.Pkgbase == "" {
|
|
||||||
return "", errors.New("invalid arguments")
|
|
||||||
}
|
|
||||||
|
|
||||||
stateFiles, _ := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase))
|
|
||||||
|
|
||||||
var fStateFiles []string
|
|
||||||
for _, stateFile := range stateFiles {
|
|
||||||
_, subRepo, _, err := stateFileMeta(stateFile)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if subRepo != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !Contains(fStateFiles, stateFile) {
|
|
||||||
fStateFiles = append(fStateFiles, stateFile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fStateFiles) > 1 {
|
|
||||||
log.Infof("%s: multiple statefiles found, try resolving from mirror", p.Pkgbase)
|
|
||||||
dbs, err := h.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
iPackage, err := dbs.FindSatisfier(p.Pkgbase)
|
|
||||||
buildManager.alpmMutex.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, stateFile := range fStateFiles {
|
|
||||||
repo, _, _, err := stateFileMeta(stateFile)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if iPackage.DB().Name() == repo {
|
|
||||||
fStateFiles = []string{stateFile}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fStateFiles) > 1 {
|
|
||||||
return "", MultipleStateFilesError{fmt.Errorf("%s: multiple statefiles found: %s", p.Pkgbase, fStateFiles)}
|
|
||||||
}
|
|
||||||
log.Infof("%s: resolving successful: MirrorRepo=%s; statefile chosen: %s", p.Pkgbase, iPackage.DB().Name(), fStateFiles[0])
|
|
||||||
} else if len(fStateFiles) == 0 {
|
|
||||||
return "", fmt.Errorf("%s: no matching statefile found (searched: %s, canidates: %s)", p.Pkgbase,
|
|
||||||
filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase), stateFiles)
|
|
||||||
}
|
|
||||||
|
|
||||||
rawState, err := os.ReadFile(fStateFiles[0])
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("error reading statefile %s: %w", fStateFiles[0], err)
|
|
||||||
}
|
|
||||||
state, err := parseState(string(rawState))
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("error parsing statefile: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return state.PkgVer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) isPkgFailed() bool {
|
|
||||||
if p.DBPackage.Version == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if alpm.VerCmp(p.DBPackage.Version, p.Version) < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return p.DBPackage.Status == dbpackage.StatusFailed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) genSrcinfo() error {
|
|
||||||
if p.Srcinfo != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild)) //nolint:gosec
|
|
||||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("makepkg exit non-zero (PKGBUILD: %s): %w (%s)", p.Pkgbuild, err, string(res))
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := srcinfo.Parse(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.Srcinfo = info
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) findPkgFiles() error {
|
|
||||||
pkgs, err := os.ReadDir(filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.DBPackage == nil && p.Srcinfo == nil {
|
|
||||||
return errors.New("unable to find pkgfiles without dbpkg or srcinfo present")
|
|
||||||
}
|
|
||||||
|
|
||||||
var realPkgs []string
|
|
||||||
if p.DBPackage != nil {
|
|
||||||
realPkgs = append(realPkgs, p.DBPackage.Packages...)
|
|
||||||
} else {
|
|
||||||
for i := range p.Srcinfo.Packages {
|
|
||||||
realPkgs = append(realPkgs, p.Srcinfo.Packages[i].Pkgname)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var fPkg []string
|
|
||||||
for _, file := range pkgs {
|
|
||||||
if !file.IsDir() && !strings.HasSuffix(file.Name(), ".sig") {
|
|
||||||
matches := rePkgFile.FindStringSubmatch(file.Name())
|
|
||||||
|
|
||||||
if len(matches) > 1 && Contains(realPkgs, matches[1]) {
|
|
||||||
fPkg = append(fPkg, filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch, file.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.PkgFiles = fPkg
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) toDBPackage(ctx context.Context, create bool) error {
|
|
||||||
if p.DBPackage != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPkg, err := db.DBPackage.Query().Where(
|
|
||||||
dbpackage.Pkgbase(p.Pkgbase),
|
|
||||||
dbpackage.March(p.March),
|
|
||||||
dbpackage.RepositoryEQ(p.Repo),
|
|
||||||
).Only(ctx)
|
|
||||||
if err != nil && ent.IsNotFound(err) && create {
|
|
||||||
dbPkg, err = db.DBPackage.Create().
|
|
||||||
SetPkgbase(p.Pkgbase).
|
|
||||||
SetMarch(p.March).
|
|
||||||
SetRepository(p.Repo).
|
|
||||||
Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if err != nil && !ent.IsNotFound(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.DBPackage = dbPkg
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) exists() (bool, error) {
|
|
||||||
dbPkg, err := db.DBPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dbPkg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg *alpm.Package, version string, err error) {
|
|
||||||
dbs, err := h.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return false, nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
allDepends := p.Srcinfo.Depends
|
|
||||||
allDepends = append(allDepends, p.Srcinfo.MakeDepends...)
|
|
||||||
// add gcc to dependents, since we can't know for sure if its in use
|
|
||||||
// prevents issues like #111
|
|
||||||
allDepends = append(allDepends, srcinfo.ArchString{
|
|
||||||
Arch: "x86_64",
|
|
||||||
Value: "gcc",
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, dep := range allDepends {
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
pkg, err := dbs.FindSatisfier(dep.Value)
|
|
||||||
buildManager.alpmMutex.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return false, nil, "", UnableToSatisfyError{err}
|
|
||||||
}
|
|
||||||
|
|
||||||
svn2gitVer, err := (&ProtoPackage{ //nolint:exhaustruct,exhaustivestruct
|
|
||||||
Pkgbase: pkg.Base(),
|
|
||||||
March: p.March,
|
|
||||||
}).GitVersion(h)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil, "", err
|
|
||||||
} else if svn2gitVer == "" {
|
|
||||||
return false, nil, "", errors.New("no svn2git version")
|
|
||||||
}
|
|
||||||
|
|
||||||
if alpm.VerCmp(svn2gitVer, pkg.Version()) > 0 {
|
|
||||||
switch v := pkg.(type) {
|
|
||||||
case *alpm.Package:
|
|
||||||
return false, v, svn2gitVer, nil
|
|
||||||
default:
|
|
||||||
return false, nil, "", fmt.Errorf("invalid package type: %T", pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) PkgbaseEquals(p2 *ProtoPackage, marchSensitive bool) bool {
|
|
||||||
return (marchSensitive && (p.Pkgbase == p2.Pkgbase && p.FullRepo == p2.FullRepo)) || (!marchSensitive && p.Pkgbase == p2.Pkgbase)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProtoPackage) IsBuilt() (bool, error) {
|
|
||||||
if p.DBPackage == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
matches, err := filepath.Glob(filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo, p.DBPackage.Packages[0]+"*-x86_64.pkg.tar.zst"))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(matches) > 0, nil
|
|
||||||
}
|
|
@@ -1,187 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const PkgbuildTest = `# Maintainer: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
|
|
||||||
|
|
||||||
pkgname=gnome-todo
|
|
||||||
pkgver=41.0+r69+ga9a5b7cd
|
|
||||||
pkgrel=1
|
|
||||||
pkgdesc="Task manager for GNOME"
|
|
||||||
url="https://wiki.gnome.org/Apps/Todo"
|
|
||||||
arch=(x86_64)
|
|
||||||
license=(GPL)
|
|
||||||
depends=(evolution-data-server libpeas python gtk4 libportal-gtk4 libadwaita)
|
|
||||||
makedepends=(gobject-introspection appstream-glib git meson yelp-tools)
|
|
||||||
groups=(gnome-extra)
|
|
||||||
_commit=a9a5b7cdde0244331d2d49220f04018be60c018e # master
|
|
||||||
source=("git+https://gitlab.gnome.org/GNOME/gnome-todo.git#commit=$_commit")
|
|
||||||
sha256sums=('SKIP')
|
|
||||||
|
|
||||||
pkgver() {
|
|
||||||
cd $pkgname
|
|
||||||
git describe --tags | sed 's/^GNOME_TODO_//;s/_/./g;s/[^-]*-g/r&/;s/-/+/g'
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
prepare() {
|
|
||||||
cd $pkgname
|
|
||||||
}
|
|
||||||
|
|
||||||
build() {
|
|
||||||
arch-meson $pkgname build
|
|
||||||
meson compile -C build
|
|
||||||
}
|
|
||||||
|
|
||||||
check() (
|
|
||||||
glib-compile-schemas "${GSETTINGS_SCHEMA_DIR:=$PWD/$pkgname/data}"
|
|
||||||
export GSETTINGS_SCHEMA_DIR
|
|
||||||
|
|
||||||
meson test -C build --print-errorlogs
|
|
||||||
)
|
|
||||||
|
|
||||||
package() {
|
|
||||||
meson install -C build --destdir "$pkgdir"
|
|
||||||
}
|
|
||||||
|
|
||||||
# vim:set sw=2 et:
|
|
||||||
`
|
|
||||||
|
|
||||||
const PkgbuildTestWithPkgrelSub = `# Maintainer: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
|
|
||||||
|
|
||||||
pkgname=gnome-todo
|
|
||||||
pkgver=41.0+r69+ga9a5b7cd
|
|
||||||
pkgrel=1.1
|
|
||||||
pkgdesc="Task manager for GNOME"
|
|
||||||
url="https://wiki.gnome.org/Apps/Todo"
|
|
||||||
arch=(x86_64)
|
|
||||||
license=(GPL)
|
|
||||||
depends=(evolution-data-server libpeas python gtk4 libportal-gtk4 libadwaita)
|
|
||||||
makedepends=(gobject-introspection appstream-glib git meson yelp-tools)
|
|
||||||
groups=(gnome-extra)
|
|
||||||
_commit=a9a5b7cdde0244331d2d49220f04018be60c018e # master
|
|
||||||
source=("git+https://gitlab.gnome.org/GNOME/gnome-todo.git#commit=$_commit")
|
|
||||||
sha256sums=('SKIP')
|
|
||||||
|
|
||||||
pkgver() {
|
|
||||||
cd $pkgname
|
|
||||||
git describe --tags | sed 's/^GNOME_TODO_//;s/_/./g;s/[^-]*-g/r&/;s/-/+/g'
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
prepare() {
|
|
||||||
cd $pkgname
|
|
||||||
}
|
|
||||||
|
|
||||||
build() {
|
|
||||||
arch-meson $pkgname build
|
|
||||||
meson compile -C build
|
|
||||||
}
|
|
||||||
|
|
||||||
check() (
|
|
||||||
glib-compile-schemas "${GSETTINGS_SCHEMA_DIR:=$PWD/$pkgname/data}"
|
|
||||||
export GSETTINGS_SCHEMA_DIR
|
|
||||||
|
|
||||||
meson test -C build --print-errorlogs
|
|
||||||
)
|
|
||||||
|
|
||||||
package() {
|
|
||||||
meson install -C build --destdir "$pkgdir"
|
|
||||||
}
|
|
||||||
|
|
||||||
# vim:set sw=2 et:
|
|
||||||
`
|
|
||||||
|
|
||||||
func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
|
|
||||||
pkgbuild, err := os.CreateTemp(t.TempDir(), "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("unable to setup temp. PKGBUILD")
|
|
||||||
}
|
|
||||||
defer func(name string) {
|
|
||||||
_ = os.Remove(name)
|
|
||||||
}(pkgbuild.Name())
|
|
||||||
|
|
||||||
_, err = pkgbuild.WriteString(PkgbuildTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("unable to write to temp. PKGBUILD")
|
|
||||||
}
|
|
||||||
_ = pkgbuild.Close()
|
|
||||||
|
|
||||||
buildPkg := &ProtoPackage{
|
|
||||||
Pkgbase: "gnome-todo",
|
|
||||||
Pkgbuild: pkgbuild.Name(),
|
|
||||||
}
|
|
||||||
|
|
||||||
err = buildPkg.increasePkgRel(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("increasePkgRel: %v", err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
versionSplit := strings.Split(buildPkg.Version, "-")
|
|
||||||
if versionSplit[len(versionSplit)-1] != "1.1" {
|
|
||||||
t.Logf("increasePkgRel: expected 1.1 pkgrel, got: %s", buildPkg.Version)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
buildPkg.Srcinfo = nil
|
|
||||||
err = buildPkg.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("increasePkgRel: %v", err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
if buildPkg.Srcinfo.Pkgrel != "1.1" {
|
|
||||||
t.Logf("increasePkgRel: expected 1.1 pkgrel, got: %s", buildPkg.Srcinfo.Pkgrel)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIncreasePkgRelWithPkgSub(t *testing.T) { //nolint:paralleltest
|
|
||||||
pkgbuild, err := os.CreateTemp(t.TempDir(), "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("unable to setup temp. PKGBUILD")
|
|
||||||
}
|
|
||||||
defer func(name string) {
|
|
||||||
_ = os.Remove(name)
|
|
||||||
}(pkgbuild.Name())
|
|
||||||
|
|
||||||
_, err = pkgbuild.WriteString(PkgbuildTestWithPkgrelSub)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("unable to write to temp. PKGBUILD")
|
|
||||||
}
|
|
||||||
_ = pkgbuild.Close()
|
|
||||||
|
|
||||||
buildPkg := &ProtoPackage{
|
|
||||||
Pkgbase: "gnome-todo",
|
|
||||||
Pkgbuild: pkgbuild.Name(),
|
|
||||||
}
|
|
||||||
|
|
||||||
err = buildPkg.increasePkgRel(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("increasePkgRel: %v", err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
versionSplit := strings.Split(buildPkg.Version, "-")
|
|
||||||
if versionSplit[len(versionSplit)-1] != "1.2" {
|
|
||||||
t.Logf("increasePkgRel: expected 1.2 pkgrel, got: %s", buildPkg.Version)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
buildPkg.Srcinfo = nil
|
|
||||||
err = buildPkg.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("increasePkgRel: %v", err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
if buildPkg.Srcinfo.Pkgrel != "1.2" {
|
|
||||||
t.Logf("increasePkgRel: expected 1.2 pkgrel, got: %s", buildPkg.Srcinfo.Pkgrel)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
}
|
|
18
rm_chroot.py
18
rm_chroot.py
@@ -1,18 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
SAVE_PATH = "/path/to/workdir"
|
|
||||||
|
|
||||||
try:
|
|
||||||
chroot_abs = Path(sys.argv[1]).resolve(True)
|
|
||||||
except:
|
|
||||||
print("path does not resolve")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if str(chroot_abs).startswith(SAVE_PATH):
|
|
||||||
os.system("rm -rf " + str(chroot_abs))
|
|
||||||
else:
|
|
||||||
sys.exit(2)
|
|
892
utils.go
892
utils.go
@@ -1,892 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/Jguer/go-alpm/v2"
|
|
||||||
paconf "github.com/Morganamilo/go-pacmanconf"
|
|
||||||
"github.com/Morganamilo/go-srcinfo"
|
|
||||||
"github.com/c2h5oh/datasize"
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
"io"
|
|
||||||
"io/fs"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
pacmanConf = "/usr/share/devtools/pacman.conf.d/multilib.conf"
|
|
||||||
makepkgConf = "/usr/share/devtools/makepkg.conf.d/x86_64.conf"
|
|
||||||
makepkgConfExt = "/etc/makepkg.conf.d"
|
|
||||||
logDir = "logs"
|
|
||||||
pristineChroot = "root"
|
|
||||||
buildDir = "build"
|
|
||||||
lastUpdate = "lastupdate"
|
|
||||||
stateDir = "state"
|
|
||||||
chrootDir = "chroot"
|
|
||||||
makepkgDir = "makepkg"
|
|
||||||
waitingDir = "to_be_moved"
|
|
||||||
makepkgLTO = "makepkg-%s-non-lto.conf"
|
|
||||||
makepkg = "makepkg-%s.conf"
|
|
||||||
flagConfig = "flags.yaml"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
reVar = regexp.MustCompile(`(?mU)^#?[^\S\r\n]*(\w+)[^\S\r\n]*=[^\S\r\n]*([("])([^)"]*)([)"])[^\S\r\n]*$`)
|
|
||||||
reEnvClean = regexp.MustCompile(`(?m) ([\s\\]+) `)
|
|
||||||
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
|
|
||||||
rePkgFile = regexp.MustCompile(`^(.+)(?:-.+){2}-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
|
|
||||||
reLdError = regexp.MustCompile(`(?mi).*collect2: error: ld returned (\d+) exit status.*`)
|
|
||||||
reDownloadError = regexp.MustCompile(`(?m)^error: could not rename .+$`)
|
|
||||||
reDownloadError2 = regexp.MustCompile(`(?m)^error: failed retrieving file '.+' from .*: The requested URL returned error: .+$`)
|
|
||||||
rePortError = regexp.MustCompile(`(?m)^OSError: \x5bErrno 98\x5d Address already in use$`)
|
|
||||||
reSigError = regexp.MustCompile(`(?m)^error: .*: signature from .* is invalid$`)
|
|
||||||
reRustLTOError = regexp.MustCompile(`(?m)^error: options \x60-C (.+)\x60 and \x60-C lto\x60 are incompatible$`)
|
|
||||||
reReplaceSinglePlus = regexp.MustCompile(`(?m)([a-zA-Z0-9]+)\+([a-zA-Z]+)`)
|
|
||||||
reReplaceRemainingPlus = regexp.MustCompile(`(?m)\+`)
|
|
||||||
reReplaceSpecialChars = regexp.MustCompile(`(?m)[^a-zA-Z0-9_\-.]`)
|
|
||||||
reReplaceUnderscore = regexp.MustCompile(`(?m)[_\-]{2,}`)
|
|
||||||
reReplaceTree = regexp.MustCompile(`(?m)^tree$`)
|
|
||||||
reReplacePacsiftWarning = regexp.MustCompile(`(?m)^warning:.*\n*`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type Conf struct {
|
|
||||||
Arch string
|
|
||||||
Repos, March []string
|
|
||||||
StateRepo string `yaml:"state_repo"`
|
|
||||||
Basedir struct {
|
|
||||||
Repo, Work, Debug string
|
|
||||||
}
|
|
||||||
DB struct {
|
|
||||||
Driver string
|
|
||||||
ConnectTo string `yaml:"connect_to"`
|
|
||||||
} `yaml:"db"`
|
|
||||||
Build struct {
|
|
||||||
Makej int
|
|
||||||
Checks bool
|
|
||||||
MemoryLimit datasize.ByteSize `yaml:"memory_limit"`
|
|
||||||
}
|
|
||||||
Logging struct {
|
|
||||||
Level string
|
|
||||||
}
|
|
||||||
Blacklist struct {
|
|
||||||
Packages, Repo []string
|
|
||||||
LTO []string `yaml:"lto"`
|
|
||||||
}
|
|
||||||
Housekeeping struct {
|
|
||||||
Interval string
|
|
||||||
}
|
|
||||||
MaxCloneRetries uint64 `yaml:"max_clone_retries"`
|
|
||||||
Metrics struct {
|
|
||||||
Port uint32
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Globs []string
|
|
||||||
|
|
||||||
type MultipleStateFilesError struct {
|
|
||||||
error
|
|
||||||
}
|
|
||||||
type UnableToSatisfyError struct {
|
|
||||||
error
|
|
||||||
}
|
|
||||||
|
|
||||||
type StateInfo struct {
|
|
||||||
Pkgbase string
|
|
||||||
PkgVer string
|
|
||||||
TagVer string
|
|
||||||
TagRev string
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateLastUpdated() error {
|
|
||||||
err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0o644) //nolint:gosec
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanBuildDir(dir, chrootDir string) error {
|
|
||||||
if stat, err := os.Stat(dir); err == nil && stat.IsDir() {
|
|
||||||
rmCmd := exec.Command("sudo", "rm_chroot.py", dir)
|
|
||||||
_, err := rmCmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if chrootDir != "" {
|
|
||||||
if stat, err := os.Stat(chrootDir); err == nil && stat.IsDir() {
|
|
||||||
rmCmd := exec.Command("sudo", "rm_chroot.py", chrootDir)
|
|
||||||
_, err := rmCmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = os.Remove(chrootDir + ".lock")
|
|
||||||
} else if !os.IsNotExist(err) {
|
|
||||||
return fmt.Errorf("chroot dir was not an directory or failed to stat: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize {
|
|
||||||
var sum uint64
|
|
||||||
for _, pkg := range pkgList {
|
|
||||||
if pkg.DBPackage.MaxRss != nil {
|
|
||||||
sum += uint64(*pkg.DBPackage.MaxRss) //nolint:gosec
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// multiply by Kibibyte here, since rusage is in kb
|
|
||||||
// https://man.archlinux.org/man/core/man-pages/getrusage.2.en#ru_maxrss
|
|
||||||
return datasize.ByteSize(sum) * datasize.KB
|
|
||||||
}
|
|
||||||
|
|
||||||
func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string, err error) {
|
|
||||||
nameSplit := strings.Split(filepath.Base(filepath.Dir(stateFile)), "-")
|
|
||||||
if len(nameSplit) < 2 {
|
|
||||||
err = errors.New("error getting metainfo")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
repo = nameSplit[0]
|
|
||||||
if len(nameSplit) == 3 {
|
|
||||||
subRepo = &nameSplit[1]
|
|
||||||
arch = nameSplit[2]
|
|
||||||
} else {
|
|
||||||
arch = nameSplit[1]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func movePackagesLive(ctx context.Context, fullRepo string) error {
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
march := strings.Join(strings.Split(fullRepo, "-")[1:], "-")
|
|
||||||
repo := strings.Split(fullRepo, "-")[0]
|
|
||||||
|
|
||||||
pkgFiles, err := filepath.Glob(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo, "*.pkg.tar.zst"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
toAdd := make([]*ProtoPackage, 0)
|
|
||||||
debugPkgs := 0
|
|
||||||
|
|
||||||
for _, file := range pkgFiles {
|
|
||||||
pkg := Package(file)
|
|
||||||
dbPkg, err := pkg.DBPackageIsolated(ctx, march, dbpackage.Repository(repo), db)
|
|
||||||
if err != nil {
|
|
||||||
if strings.HasSuffix(pkg.Name(), "-debug") {
|
|
||||||
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0o755)
|
|
||||||
if mkErr != nil {
|
|
||||||
return fmt.Errorf("unable to create folder for debug-packages: %w", mkErr)
|
|
||||||
}
|
|
||||||
forPackage := strings.TrimSuffix(pkg.Name(), "-debug")
|
|
||||||
log.Debugf("[MOVE] found debug package for package %s: %s", forPackage, pkg.Name())
|
|
||||||
debugPkgs++
|
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march, filepath.Base(file))); err == nil {
|
|
||||||
log.Warningf("[MOVE] overwrite existing debug infos for %s: %s", forPackage,
|
|
||||||
filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = Copy(file, filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = os.Remove(file)
|
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Warningf("[MOVE] deleting package %s: %v", pkg.Name(), err)
|
|
||||||
_ = os.Remove(file)
|
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rawState, err := os.ReadFile(filepath.Join(conf.Basedir.Work, stateDir, dbPkg.Repository.String()+"-"+conf.Arch, dbPkg.Pkgbase))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[MOVE] state not found for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
_ = os.Remove(file)
|
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
state, err := parseState(string(rawState))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[MOVE] error parsing state file for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
_ = os.Remove(file)
|
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err = Copy(file, filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = os.Remove(file)
|
|
||||||
err = Copy(file+".sig", filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file)+".sig"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
|
|
||||||
toAdd = append(toAdd, &ProtoPackage{
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))},
|
|
||||||
Version: pkg.Version(),
|
|
||||||
March: march,
|
|
||||||
State: state,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(toAdd) > 0 {
|
|
||||||
log.Infof("[%s] adding %d (%d with debug) packages", fullRepo, len(toAdd), debugPkgs)
|
|
||||||
buildManager.repoAdd[fullRepo] <- toAdd
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func packages2slice(pkgs any) []string {
|
|
||||||
switch v := pkgs.(type) {
|
|
||||||
case []srcinfo.Package:
|
|
||||||
var sPkgs []string
|
|
||||||
for i := range v {
|
|
||||||
sPkgs = append(sPkgs, v[i].Pkgname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sPkgs
|
|
||||||
case []srcinfo.ArchString:
|
|
||||||
var sPkgs []string
|
|
||||||
for _, p := range v {
|
|
||||||
sPkgs = append(sPkgs, p.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sPkgs
|
|
||||||
default:
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func constructVersion(pkgver, pkgrel, epoch string) string {
|
|
||||||
if epoch == "" {
|
|
||||||
return pkgver + "-" + pkgrel
|
|
||||||
}
|
|
||||||
return epoch + ":" + pkgver + "-" + pkgrel
|
|
||||||
}
|
|
||||||
|
|
||||||
func initALPM(root, dbpath string) (*alpm.Handle, error) {
|
|
||||||
h, err := alpm.Initialize(root, dbpath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
pacmanConfig, _, err := paconf.ParseFile(pacmanConf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, repo := range pacmanConfig.Repos {
|
|
||||||
db, err := h.RegisterSyncDB(repo.Name, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
db.SetServers(repo.Servers)
|
|
||||||
|
|
||||||
if len(repo.Usage) == 0 {
|
|
||||||
db.SetUsage(alpm.UsageAll)
|
|
||||||
}
|
|
||||||
for _, usage := range repo.Usage {
|
|
||||||
switch usage {
|
|
||||||
case "Sync":
|
|
||||||
db.SetUsage(alpm.UsageSync)
|
|
||||||
case "Search":
|
|
||||||
db.SetUsage(alpm.UsageSearch)
|
|
||||||
case "Install":
|
|
||||||
db.SetUsage(alpm.UsageInstall)
|
|
||||||
case "Upgrade":
|
|
||||||
db.SetUsage(alpm.UsageUpgrade)
|
|
||||||
case "All":
|
|
||||||
db.SetUsage(alpm.UsageAll)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupChroot(ctx context.Context) error {
|
|
||||||
_, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
cmd := exec.CommandContext(ctx, "arch-nspawn", "-C", pacmanConf, //nolint:gosec
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "pacman", "-Syuu", "--noconfirm")
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error updating chroot: %w: %s", err, string(res))
|
|
||||||
}
|
|
||||||
case os.IsNotExist(err):
|
|
||||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cmd := exec.CommandContext(ctx, "mkarchroot", "-C", pacmanConf, "-M", makepkgConf, //nolint:gosec
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel", "multilib-devel")
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating chroot: %w: %s", err, string(res))
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy pacman.conf into pristine chroot to enable multilib
|
|
||||||
cmd = exec.CommandContext(ctx, "sudo", "cp", pacmanConf, //nolint:gosec
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/pacman.conf"))
|
|
||||||
res, err = cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error copying pacman.conf to chroot: %w: %s", err, string(res))
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove makepkg conf extension, they are covered by our custom makepkg
|
|
||||||
cmd = exec.CommandContext(ctx, "sudo", "rm_chroot.py", //nolint:gosec
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/makepkg.conf.d"))
|
|
||||||
res, err = cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error removing makepkg.conf.d from chroot: %w: %s", err, string(res))
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func syncMarchs(ctx context.Context) error {
|
|
||||||
files, err := os.ReadDir(conf.Basedir.Repo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var eRepos []string
|
|
||||||
for _, file := range files {
|
|
||||||
if file.Name() != "." && file.Name() != logDir && file.Name() != makepkgDir && file.IsDir() {
|
|
||||||
eRepos = append(eRepos, file.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
flagConfigRaw, err := os.ReadFile(flagConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var flagCfg map[string]any
|
|
||||||
err = yaml.Unmarshal(flagConfigRaw, &flagCfg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, march := range conf.March {
|
|
||||||
err := setupMakepkg(march, flagCfg)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error generating makepkg for %s: %v", march, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, repo := range conf.Repos {
|
|
||||||
fRepo := fmt.Sprintf("%s-%s", repo, march)
|
|
||||||
repos = append(repos, fRepo)
|
|
||||||
buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, 1000)
|
|
||||||
buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 1000)
|
|
||||||
go buildManager.repoWorker(ctx, fRepo)
|
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)); os.IsNotExist(err) {
|
|
||||||
log.Debugf("creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))
|
|
||||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch), 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i := Find(eRepos, fRepo); i != -1 {
|
|
||||||
eRepos = append(eRepos[:i], eRepos[i+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("repos: %s", repos)
|
|
||||||
|
|
||||||
for _, repo := range eRepos {
|
|
||||||
log.Infof("removing old repo %s", repo)
|
|
||||||
err = os.RemoveAll(filepath.Join(conf.Basedir.Repo, repo))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func replaceStringsFromMap(str string, replace map[string]string) string {
|
|
||||||
for k, v := range replace {
|
|
||||||
str = strings.ReplaceAll(str, k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseFlagSubSection(list any, res []string, replaceMap map[string]string) []string {
|
|
||||||
for _, cEntry := range list.([]any) {
|
|
||||||
switch ce := cEntry.(type) {
|
|
||||||
case map[any]any:
|
|
||||||
for k, v := range ce {
|
|
||||||
if v == nil {
|
|
||||||
res = append(res[:Find(res, k.(string))], res[Find(res, k.(string))+1:]...)
|
|
||||||
} else if s, ok := v.(string); ok {
|
|
||||||
Replace(res, k.(string), replaceStringsFromMap(s, replaceMap))
|
|
||||||
} else {
|
|
||||||
log.Warningf("malformated flag-config: unable to handle %v:%v", replaceStringsFromMap(k.(string), replaceMap), v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
res = append(res, replaceStringsFromMap(ce, replaceMap))
|
|
||||||
default:
|
|
||||||
log.Warningf("malformated flag-config: unable to handle %v (%T)", cEntry, cEntry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseFlagSection(section any, makepkgConf, march string) (string, error) {
|
|
||||||
replaceMap := map[string]string{"$level$": march[len(march)-2:], "$march$": march, "$buildproc$": strconv.Itoa(conf.Build.Makej)}
|
|
||||||
|
|
||||||
if ct, ok := section.(map[any]any); ok {
|
|
||||||
for subSec, subMap := range ct {
|
|
||||||
varsReg := reVar.FindAllStringSubmatch(makepkgConf, -1)
|
|
||||||
if varsReg == nil {
|
|
||||||
return "", errors.New("no match in config found")
|
|
||||||
}
|
|
||||||
|
|
||||||
var flags []string
|
|
||||||
var orgMatch []string
|
|
||||||
for _, match := range varsReg {
|
|
||||||
if strings.ToLower(match[1]) == subSec.(string) {
|
|
||||||
if subSec.(string) == "ldflags" {
|
|
||||||
flags = strings.Split(reEnvClean.ReplaceAllString(match[3], ","), ",")
|
|
||||||
} else {
|
|
||||||
flags = strings.Split(reEnvClean.ReplaceAllString(match[3], " "), " ")
|
|
||||||
}
|
|
||||||
orgMatch = match
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := subMap.(string); ok && len(orgMatch) > 0 {
|
|
||||||
log.Debugf("replace %s with %s", orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
|
|
||||||
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
|
|
||||||
makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
|
|
||||||
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(orgMatch) == 0 {
|
|
||||||
// no match found, assume env var and append it
|
|
||||||
log.Debugf("no match found for %s:%v, appending", subSec, subMap)
|
|
||||||
switch sm := subMap.(type) {
|
|
||||||
case string:
|
|
||||||
if strings.Contains(sm, " ") {
|
|
||||||
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(sm, replaceMap))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
makepkgConf += fmt.Sprintf("\nexport %s=%s", strings.ToUpper(subSec.(string)), replaceStringsFromMap(sm, replaceMap))
|
|
||||||
continue
|
|
||||||
case []string:
|
|
||||||
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(strings.Join(sm, " "), replaceMap)) //nolint:lll
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("original %s: %v (%d)", subSec, flags, len(flags))
|
|
||||||
flags = parseFlagSubSection(subMap, flags, replaceMap)
|
|
||||||
log.Debugf("new %s: %v (%d)", subSec, flags, len(flags))
|
|
||||||
|
|
||||||
if subSec.(string) == "ldflags" {
|
|
||||||
makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf(`%s=%s%s%s`, orgMatch[1],
|
|
||||||
orgMatch[2], strings.Join(flags, ","), orgMatch[4]))
|
|
||||||
} else {
|
|
||||||
makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf(`%s=%s%s%s`, orgMatch[1],
|
|
||||||
orgMatch[2], strings.Join(flags, " "), orgMatch[4]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return makepkgConf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//goland:noinspection SpellCheckingInspection
|
|
||||||
func setupMakepkg(march string, flags map[string]any) error {
|
|
||||||
lMakepkg := filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkg, march))
|
|
||||||
lMakepkgLTO := filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgLTO, march))
|
|
||||||
|
|
||||||
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, makepkgDir), 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t, err := os.ReadFile(makepkgConf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
makepkgStrBuilder := new(strings.Builder)
|
|
||||||
makepkgStrBuilder.Write(t)
|
|
||||||
|
|
||||||
// read makepkg conf.d
|
|
||||||
makepkgConfExt, err := Glob(filepath.Join(makepkgConfExt, "*.conf"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, makepkgExt := range makepkgConfExt {
|
|
||||||
ext, err := os.ReadFile(makepkgExt)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
makepkgStrBuilder.Write(ext)
|
|
||||||
}
|
|
||||||
|
|
||||||
makepkgStr := makepkgStrBuilder.String()
|
|
||||||
|
|
||||||
makepkgStr, err = parseFlagSection(flags["common"], makepkgStr, march)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// write non-lto makepkg
|
|
||||||
err = os.WriteFile(lMakepkgLTO, []byte(makepkgStr), 0o644) //nolint:gosec
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
makepkgStr, err = parseFlagSection(flags["lto"], makepkgStr, march)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// write makepkg
|
|
||||||
err = os.WriteFile(lMakepkg, []byte(makepkgStr), 0o644) //nolint:gosec
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseState(state string) (*StateInfo, error) {
|
|
||||||
ss := strings.Split(state, " ")
|
|
||||||
if len(ss) != 4 {
|
|
||||||
return nil, errors.New("invalid state file")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &StateInfo{
|
|
||||||
Pkgbase: ss[0],
|
|
||||||
PkgVer: ss[1],
|
|
||||||
TagVer: ss[2],
|
|
||||||
TagRev: strings.Trim(ss[3], "\n"),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ContainsPkg(pkgs []*ProtoPackage, pkg *ProtoPackage, repoSensitive bool) bool {
|
|
||||||
for _, tPkg := range pkgs {
|
|
||||||
if tPkg.PkgbaseEquals(pkg, repoSensitive) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func Contains(s any, str string) bool {
|
|
||||||
switch v := s.(type) {
|
|
||||||
case []string:
|
|
||||||
if i := Find(v, str); i != -1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case []srcinfo.ArchString:
|
|
||||||
var n []string
|
|
||||||
for _, as := range v {
|
|
||||||
n = append(n, as.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if i := Find(n, str); i != -1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func Find[T comparable](arr []T, match T) int {
|
|
||||||
for i, v := range arr {
|
|
||||||
if v == match {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func Replace[T comparable](arr []T, replace, with T) []T {
|
|
||||||
for i, v := range arr {
|
|
||||||
if v == replace {
|
|
||||||
arr[i] = with
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return arr
|
|
||||||
}
|
|
||||||
|
|
||||||
func Glob(pattern string) ([]string, error) {
|
|
||||||
if !strings.Contains(pattern, "**") {
|
|
||||||
return filepath.Glob(pattern)
|
|
||||||
}
|
|
||||||
return Globs(strings.Split(pattern, "**")).Expand()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (globs Globs) Expand() ([]string, error) {
|
|
||||||
var matches = []string{""}
|
|
||||||
for _, g := range globs {
|
|
||||||
var hits []string
|
|
||||||
var hitMap = map[string]bool{}
|
|
||||||
for _, match := range matches {
|
|
||||||
paths, err := filepath.Glob(match + g)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, path := range paths {
|
|
||||||
err = filepath.WalkDir(path, func(path string, _ os.DirEntry, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return fs.SkipDir
|
|
||||||
}
|
|
||||||
if _, ok := hitMap[path]; !ok {
|
|
||||||
hits = append(hits, path)
|
|
||||||
hitMap[path] = true
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
matches = hits
|
|
||||||
}
|
|
||||||
|
|
||||||
if globs == nil && len(matches) > 0 && matches[0] == "" {
|
|
||||||
matches = matches[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return matches, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func MatchGlobList(target string, globs []string) bool {
|
|
||||||
for _, lGlob := range globs {
|
|
||||||
tGlob, err := glob.Compile(lGlob)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("failed to compile glob %s: %v", lGlob, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if tGlob.Match(target) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func Copy(srcPath, dstPath string) (err error) {
|
|
||||||
r, err := os.Open(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func(r *os.File) {
|
|
||||||
_ = r.Close()
|
|
||||||
}(r)
|
|
||||||
|
|
||||||
w, err := os.Create(dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if c := w.Close(); err == nil {
|
|
||||||
err = c
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = io.Copy(w, r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadSRCINFO(pkg, tag string) (*srcinfo.Srcinfo, error) {
|
|
||||||
resp, err := http.Get(fmt.Sprintf(
|
|
||||||
"https://gitlab.archlinux.org/archlinux/packaging/packages/%s/-/raw/%s/.SRCINFO", pkg, tag),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, errors.New(resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
bResp, err := io.ReadAll(resp.Body)
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nSrcInfo, err := srcinfo.Parse(string(bResp))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nSrcInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDescendantPIDs(rootPID int) ([]int, error) {
|
|
||||||
pidToPpid := map[int]int{}
|
|
||||||
var descendants []int
|
|
||||||
|
|
||||||
procEntries, err := os.ReadDir("/proc")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range procEntries {
|
|
||||||
if !entry.IsDir() || entry.Name()[0] < '0' || entry.Name()[0] > '9' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pidStr := entry.Name()
|
|
||||||
pid, err := strconv.Atoi(pidStr)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
statusPath := filepath.Join("/proc", pidStr, "status")
|
|
||||||
data, err := os.ReadFile(statusPath)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, line := range strings.Split(string(data), "\n") {
|
|
||||||
if strings.HasPrefix(line, "PPid:") {
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) == 2 {
|
|
||||||
ppid, _ := strconv.Atoi(fields[1])
|
|
||||||
pidToPpid[pid] = ppid
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var walk func(int)
|
|
||||||
walk = func(current int) {
|
|
||||||
for pid, ppid := range pidToPpid {
|
|
||||||
if ppid == current {
|
|
||||||
descendants = append(descendants, pid)
|
|
||||||
walk(pid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
walk(rootPID)
|
|
||||||
return descendants, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MemStats struct {
|
|
||||||
RSS int64
|
|
||||||
Swap int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMemoryStats(pid int) (MemStats, error) {
|
|
||||||
statusPath := fmt.Sprintf("/proc/%d/status", pid)
|
|
||||||
data, err := os.ReadFile(statusPath)
|
|
||||||
if err != nil {
|
|
||||||
return MemStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := MemStats{}
|
|
||||||
for _, line := range strings.Split(string(data), "\n") {
|
|
||||||
if strings.HasPrefix(line, "VmRSS:") {
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) >= 2 {
|
|
||||||
kb, _ := strconv.ParseInt(fields[1], 10, 64)
|
|
||||||
stats.RSS = kb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(line, "VmSwap:") {
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) >= 2 {
|
|
||||||
kb, _ := strconv.ParseInt(fields[1], 10, 64)
|
|
||||||
stats.Swap = kb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func pollMemoryUsage(pid int, interval time.Duration, done chan bool, result chan int64) {
|
|
||||||
var totalMemory int64
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
result <- totalMemory
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
var totalRSS, totalSwap int64
|
|
||||||
|
|
||||||
rootStats, err := getMemoryStats(pid)
|
|
||||||
if err == nil {
|
|
||||||
totalRSS += rootStats.RSS
|
|
||||||
totalSwap += rootStats.Swap
|
|
||||||
} else {
|
|
||||||
log.Errorf("failed to get memory stats for root process: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
descendants, err := getDescendantPIDs(pid)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to get descendants: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, dpid := range descendants {
|
|
||||||
stats, err := getMemoryStats(dpid)
|
|
||||||
if err == nil {
|
|
||||||
totalRSS += stats.RSS
|
|
||||||
totalSwap += stats.Swap
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newMemory := totalRSS + totalSwap
|
|
||||||
if newMemory > totalMemory {
|
|
||||||
totalMemory = newMemory
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(interval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Reference in New Issue
Block a user