Compare commits
1 Commits
main
...
7d16607a31
| Author | SHA1 | Date | |
|---|---|---|---|
| 7d16607a31 |
@@ -14,7 +14,7 @@ linters-settings:
|
|||||||
disabled-checks:
|
disabled-checks:
|
||||||
- whyNoLint
|
- whyNoLint
|
||||||
- filepathJoin
|
- filepathJoin
|
||||||
mnd:
|
gomnd:
|
||||||
checks:
|
checks:
|
||||||
- argument
|
- argument
|
||||||
- case
|
- case
|
||||||
@@ -32,8 +32,6 @@ linters-settings:
|
|||||||
- '8'
|
- '8'
|
||||||
- '9'
|
- '9'
|
||||||
- '10'
|
- '10'
|
||||||
- '100'
|
|
||||||
- '1000'
|
|
||||||
ignored-functions:
|
ignored-functions:
|
||||||
- strings.SplitN
|
- strings.SplitN
|
||||||
- os.OpenFile
|
- os.OpenFile
|
||||||
@@ -49,46 +47,31 @@ linters-settings:
|
|||||||
allow-unused: false # report any unused nolint directives
|
allow-unused: false # report any unused nolint directives
|
||||||
require-explanation: false # don't require an explanation for nolint directives
|
require-explanation: false # don't require an explanation for nolint directives
|
||||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||||
tagliatelle:
|
|
||||||
case:
|
|
||||||
use-field-name: true
|
|
||||||
rules:
|
|
||||||
# Any struct tag type can be used.
|
|
||||||
# Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`.
|
|
||||||
json: snake
|
|
||||||
yaml: snake
|
|
||||||
xml: camel
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable-all: true
|
disable-all: true
|
||||||
disable:
|
enable:
|
||||||
- gochecknoglobals
|
- bodyclose
|
||||||
- depguard
|
- dogsled
|
||||||
- gci
|
- dupl
|
||||||
- gofumpt
|
- errcheck
|
||||||
- goimports
|
- exportloopref
|
||||||
- varnamelen
|
- gochecknoinits
|
||||||
- funlen
|
- gocritic
|
||||||
- cyclop
|
- gofmt
|
||||||
- wsl
|
- gomnd
|
||||||
- nosnakecase
|
- goprintffuncname
|
||||||
- nlreturn
|
- gosec
|
||||||
- godot
|
- gosimple
|
||||||
- nestif
|
- govet
|
||||||
- wrapcheck
|
- lll
|
||||||
- gocognit
|
- misspell
|
||||||
- gocyclo
|
|
||||||
- maintidx
|
|
||||||
- nonamedreturns
|
|
||||||
- exhaustivestruct
|
|
||||||
- exhaustruct
|
|
||||||
- forcetypeassert
|
|
||||||
- godox
|
|
||||||
- nakedret
|
|
||||||
- tagalign
|
|
||||||
- maligned
|
|
||||||
# remove for new projects
|
|
||||||
- errname
|
|
||||||
- goerr113
|
|
||||||
- depguard
|
|
||||||
- noctx
|
- noctx
|
||||||
|
- nolintlint
|
||||||
|
- staticcheck
|
||||||
|
- stylecheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
- whitespace
|
||||||
|
|||||||
116
README.md
116
README.md
@@ -1,41 +1,49 @@
|
|||||||
# ALHP
|
# ALHP
|
||||||
|
|
||||||
[](https://status.alhp.dev)
|
[](https://somegit.dev/anonfunc/ALHP.GO/src/branch/master/LICENSE)
|
||||||
|
[](https://alhp.dev/packages.html)
|
||||||
[](https://goreportcard.com/report/somegit.dev/ALHP/ALHP.GO)
|
[](https://goreportcard.com/report/somegit.dev/ALHP/ALHP.GO)
|
||||||
[](https://pkg.go.dev/somegit.dev/ALHP/ALHP.GO)
|
[](https://pkg.go.dev/somegit.dev/ALHP/ALHP.GO)
|
||||||
[](https://somegit.dev/anonfunc/ALHP.GO/src/branch/master/LICENSE)
|
|
||||||
[](https://liberapay.com/anonfunc/)
|
[](https://liberapay.com/anonfunc/)
|
||||||
|
|
||||||
Buildbot for Archlinux based repos with different
|
Buildbot for Archlinux-based repos build with different
|
||||||
[x86-64 feature levels](https://www.phoronix.com/scan.php?page=news_item&px=GCC-11-x86-64-Feature-Levels), `-O3` and
|
[x86-64 feature levels](https://www.phoronix.com/scan.php?page=news_item&px=GCC-11-x86-64-Feature-Levels), `-O3` and
|
||||||
[LTO](https://en.wikipedia.org/wiki/Interprocedural_optimization).
|
[LTO](https://en.wikipedia.org/wiki/Interprocedural_optimization).
|
||||||
|
|
||||||
> [!WARNING]
|
> ⚠️ NVIDIA graphic users using the **proprietary driver** is highly recommended reading the
|
||||||
> NVIDIA graphics users using the **proprietary driver** are strongly encouraged to read the
|
> [FAQ about Linux kernel modules](#directly-linked-kernel-modules) ⚠️
|
||||||
> [FAQ about Linux kernel modules](#directly-linked-kernel-modules) before enabling any repos.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
<!-- TOC -->
|
<!-- TOC -->
|
||||||
* [Quick Start](#quick-start)
|
|
||||||
|
* [Quickstart](#quickstart)
|
||||||
|
* [Remove ALHP packages](#remove-alhp-packages)
|
||||||
* [FAQ](#faq)
|
* [FAQ](#faq)
|
||||||
|
* [LTO](#lto)
|
||||||
|
* [Linux Kernel packages](#linux-kernel-packages)
|
||||||
|
* [Directly linked kernel modules](#directly-linked-kernel-modules)
|
||||||
|
* [Mirrors](#mirrors)
|
||||||
|
* [What packages are built](#what-packages-are-built)
|
||||||
|
* [Debug symbols](#debug-symbols)
|
||||||
* [Matrix](#matrix)
|
* [Matrix](#matrix)
|
||||||
* [Donations](#donations)
|
* [Donations](#donations)
|
||||||
* [License and Legal](#license-and-legal)
|
* [License and Legal](#license-and-legal)
|
||||||
|
|
||||||
<!-- TOC -->
|
<!-- TOC -->
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Quick Start
|
## Quickstart
|
||||||
|
|
||||||
### 1. Check your system for support
|
### 1. Check your system for support
|
||||||
|
|
||||||
> [!CAUTION]
|
> **Important**: Before you enable any of these repos, check if your system supports the feature level you want to
|
||||||
> Before enabling any of these repos, make sure that your system supports the level of functionality you want to
|
> enable
|
||||||
> enable (e.g. `x86-64-v3`).
|
(e.g. `x86-64-v3`).
|
||||||
> **If you don't check first, you may not be able to boot your system and will have to downgrade any packages you may
|
> **If you don't check beforehand, you might be unable to boot your system anymore and need to downgrade any package
|
||||||
have upgraded.**
|
that you may have upgraded.**
|
||||||
|
|
||||||
Check which feature levels your CPU supports with
|
Check which feature-levels your CPU supports with
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
/lib/ld-linux-x86-64.so.2 --help
|
/lib/ld-linux-x86-64.so.2 --help
|
||||||
@@ -50,14 +58,10 @@ Subdirectories of glibc-hwcaps directories, in priority order:
|
|||||||
x86-64-v2 (supported, searched)
|
x86-64-v2 (supported, searched)
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!NOTE]
|
|
||||||
> ALHP repos for `x86-64-v2`, `x86-64-v3` and `x86-64-v4` are currently available. You can see all available
|
|
||||||
> repositories [here](https://alhp.dev/).
|
|
||||||
|
|
||||||
### 2. Install keyring & mirrorlist
|
### 2. Install keyring & mirrorlist
|
||||||
|
|
||||||
Install [alhp-keyring](https://aur.archlinux.org/packages/alhp-keyring/)
|
Install [alhp-keyring](https://aur.archlinux.org/packages/alhp-keyring/)
|
||||||
and [alhp-mirrorlist](https://aur.archlinux.org/packages/alhp-mirrorlist/) from the **AUR**.
|
and [alhp-mirrorlist](https://aur.archlinux.org/packages/alhp-mirrorlist/) from **AUR**.
|
||||||
|
|
||||||
Example with `yay`:
|
Example with `yay`:
|
||||||
|
|
||||||
@@ -69,16 +73,15 @@ yay -S alhp-keyring alhp-mirrorlist
|
|||||||
|
|
||||||
### 3. Choose a mirror (optional)
|
### 3. Choose a mirror (optional)
|
||||||
|
|
||||||
Edit `/etc/pacman.d/alhp-mirrorlist` and comment in/out the mirrors you want to enable/disable.
|
Edit `/etc/pacman.d/alhp-mirrorlist` and comment out/in mirrors you want to have enabled/disabled. Per default selected
|
||||||
By default, a CDN mirror provided by ALHP is selected.
|
is a cloudflare-based mirror which
|
||||||
> [!NOTE]
|
[*should* provide decent speed worldwide](https://somegit.dev/ALHP/ALHP.GO/issues/38#issuecomment-891).
|
||||||
> `cdn.alhp.dev` and `alhp.dev` are provided directly by ALHP. If you have problems with a mirror,
|
> Note: Only `alhp.dev` is hosted by ALHP directly. If you have problems with a mirror,
|
||||||
> open an issue at [the mirrorlist repo](https://somegit.dev/ALHP/alhp-mirrorlist).
|
> open an issue at [the mirrorlist repo](https://somegit.dev/ALHP/alhp-mirrorlist).
|
||||||
|
|
||||||
### 4. Modify pacman.conf
|
### 4. Modify /etc/pacman.conf
|
||||||
|
|
||||||
Add the ALHP repos to your `/etc/pacman.conf`. Make sure the appropriate ALHP repository is **above** the Archlinux
|
Add the appropriate repos **above** your regular Archlinux repos.
|
||||||
repo.
|
|
||||||
|
|
||||||
Example for `x86-64-v3`:
|
Example for `x86-64-v3`:
|
||||||
|
|
||||||
@@ -86,12 +89,12 @@ Example for `x86-64-v3`:
|
|||||||
[core-x86-64-v3]
|
[core-x86-64-v3]
|
||||||
Include = /etc/pacman.d/alhp-mirrorlist
|
Include = /etc/pacman.d/alhp-mirrorlist
|
||||||
|
|
||||||
[core]
|
|
||||||
Include = /etc/pacman.d/mirrorlist
|
|
||||||
|
|
||||||
[extra-x86-64-v3]
|
[extra-x86-64-v3]
|
||||||
Include = /etc/pacman.d/alhp-mirrorlist
|
Include = /etc/pacman.d/alhp-mirrorlist
|
||||||
|
|
||||||
|
[core]
|
||||||
|
Include = /etc/pacman.d/mirrorlist
|
||||||
|
|
||||||
[extra]
|
[extra]
|
||||||
Include = /etc/pacman.d/mirrorlist
|
Include = /etc/pacman.d/mirrorlist
|
||||||
|
|
||||||
@@ -104,31 +107,31 @@ Include = /etc/pacman.d/mirrorlist
|
|||||||
```
|
```
|
||||||
|
|
||||||
Replace `x86-64-v3` with the x86-64 feature level you want to enable.
|
Replace `x86-64-v3` with the x86-64 feature level you want to enable.
|
||||||
|
> ALHP only builds for `x86-64-v3` and `x86-64-v2` at the moment (list is subject to change). You can see all available
|
||||||
|
> repositories
|
||||||
|
> [here](https://alhp.dev/).
|
||||||
|
|
||||||
> [!TIP]
|
### 5. Update package database and upgrade:
|
||||||
> Multiple layers can be stacked as described in https://somegit.dev/ALHP/ALHP.GO/issues/255#issuecomment-3335.
|
|
||||||
|
|
||||||
### 5. Update package database and upgrade
|
|
||||||
|
|
||||||
```
|
```
|
||||||
pacman -Suy
|
pacman -Suy
|
||||||
```
|
```
|
||||||
|
|
||||||
## FAQ
|
## Remove ALHP packages
|
||||||
|
|
||||||
### Remove ALHP packages
|
To disable ALHP remove all *x86-64-vX* entries in `/etc/pacman.conf` and remove `alhp-keyring` and `alhp-mirrorlist`.
|
||||||
|
|
||||||
To disable ALHP, remove all *x86-64-vX* entries in `/etc/pacman.conf` and remove `alhp-keyring` and `alhp-mirrorlist`.
|
After that you can refresh pacmans databases and downgrade all packages like:
|
||||||
|
|
||||||
After that, you can update pacman's databases and downgrade all packages, like
|
|
||||||
|
|
||||||
```
|
```
|
||||||
pacman -Suuy
|
pacman -Suuy
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
### LTO
|
### LTO
|
||||||
|
|
||||||
Enabled for all packages built after 04 Nov 2021 12:07:00
|
Enabled for all packages build after 04 Nov 2021 12:07:00
|
||||||
UTC. [More details.](https://somegit.dev/ALHP/ALHP.GO/issues/52)
|
UTC. [More details.](https://somegit.dev/ALHP/ALHP.GO/issues/52)
|
||||||
LTO status is visible per package on the package status page.
|
LTO status is visible per package on the package status page.
|
||||||
|
|
||||||
@@ -138,7 +141,7 @@ LTO status is visible per package on the package status page.
|
|||||||
|
|
||||||
### Directly linked kernel modules
|
### Directly linked kernel modules
|
||||||
|
|
||||||
Due to our increase in pkgrel, building the kernel packages **will break any directly linked modules** such as `nvidia`
|
Due to our increase in pkgrel, building the kernel packages **breaks all directly linked modules** like `nvidia`
|
||||||
(not `nvidia-dkms`) or `virtualbox-host-modules-arch` (not `virtualbox-host-dkms`). **Their respective `dkms`-variant is
|
(not `nvidia-dkms`) or `virtualbox-host-modules-arch` (not `virtualbox-host-dkms`). **Their respective `dkms`-variant is
|
||||||
not affected**. This issue is being tracked in #68, a solution is being worked on.
|
not affected**. This issue is being tracked in #68, a solution is being worked on.
|
||||||
|
|
||||||
@@ -150,21 +153,8 @@ so, [see alhp-mirrorlist for how to become one](https://somegit.dev/ALHP/alhp-mi
|
|||||||
### What packages are built
|
### What packages are built
|
||||||
|
|
||||||
Packages [excluded](https://www.reddit.com/r/archlinux/comments/oflged/alhp_archlinux_recompiled_for_x8664v3_experimental/h4fkinu?utm_source=share&utm_medium=web2x&context=3)
|
Packages [excluded](https://www.reddit.com/r/archlinux/comments/oflged/alhp_archlinux_recompiled_for_x8664v3_experimental/h4fkinu?utm_source=share&utm_medium=web2x&context=3)
|
||||||
from building (besides all `any` architecture packages) are being listed in issue #16.
|
from building (besides all 'any' architecture packages) are being listed in issue #16.
|
||||||
See also [package status page](https://status.alhp.dev) (search for `blacklisted`).
|
Also [package status page](https://alhp.dev/packages.html) (search for `blacklisted`).
|
||||||
|
|
||||||
### Why is package X not up-to-date
|
|
||||||
|
|
||||||
Also relevant for: **I can't find package X / Application X fails to start because it links to an old/newer lib**
|
|
||||||
|
|
||||||
ALHP builds packages **after** they are released in the official Archlinux repos (excluding `[*-testing]`).
|
|
||||||
This will cause packages to be delayed if the current batch contains many packages, or packages that take a while to
|
|
||||||
build (e.g. `chromium`).
|
|
||||||
|
|
||||||
You can always check on the progress of the current build cycle on the [package status page](https://status.alhp.dev).
|
|
||||||
Please refrain from opening issues caused by packages currently in queue/not yet build/not yet moved to the repo.
|
|
||||||
Please keep in mind that large rebuilds such as `openssl` or `python` can take days to complete on our current build
|
|
||||||
hardware.
|
|
||||||
|
|
||||||
### Debug symbols
|
### Debug symbols
|
||||||
|
|
||||||
@@ -176,19 +166,9 @@ To use it, have `debuginfod` installed on your system and add it to your `DEBUGI
|
|||||||
echo "https://debuginfod.alhp.dev" > /etc/debuginfod/alhp.urls
|
echo "https://debuginfod.alhp.dev" > /etc/debuginfod/alhp.urls
|
||||||
```
|
```
|
||||||
|
|
||||||
### Switch between levels
|
|
||||||
|
|
||||||
If you want to switch between levels, e.g. from `x86-64-v3` to `x86-64-v4`, you need to revert to official packages
|
|
||||||
first, and then enable your desired repos again.
|
|
||||||
|
|
||||||
1. Comment out or remove the ALHP repo entries in `/etc/pacman.conf`.
|
|
||||||
2. Downgrade packages with `pacman -Suuy`.
|
|
||||||
3. Clear pacman's package cache with `pacman -Scc`.
|
|
||||||
4. Uncomment/add your desired repos to `/etc/pacman.conf` and update with `pacman -Suy`.
|
|
||||||
|
|
||||||
## Matrix
|
## Matrix
|
||||||
|
|
||||||
For any non-issue questions, or if you just want to chat, ALHP has a Matrix
|
For any non-issue questions or if you just want to chat, ALHP has a matrix
|
||||||
room [here](https://matrix.to/#/#alhp:ofsg.eu) (`#alhp@ofsg.eu`). You can also find me (@idlegandalf)
|
room [here](https://matrix.to/#/#alhp:ofsg.eu) (`#alhp@ofsg.eu`). You can also find me (@idlegandalf)
|
||||||
in `#archlinux:archlinux.org`.
|
in `#archlinux:archlinux.org`.
|
||||||
|
|
||||||
@@ -202,5 +182,5 @@ work ALHP would not be possible.
|
|||||||
|
|
||||||
## License and Legal
|
## License and Legal
|
||||||
|
|
||||||
This project and all of its source code is released under the terms of the GNU General Public License, version 2
|
This project including all of its source files is released under the terms of the GNU General Public License version 2
|
||||||
or any later version. See [LICENSE](https://somegit.dev/ALHP/ALHP.GO/src/branch/master/LICENSE) for details.
|
(or any later version). See [LICENSE](https://somegit.dev/ALHP/ALHP.GO/src/branch/master/LICENSE) for details.
|
||||||
|
|||||||
300
buildmanager.go
300
buildmanager.go
@@ -5,12 +5,13 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/c2h5oh/datasize"
|
"github.com/c2h5oh/datasize"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/sethvargo/go-retry"
|
"github.com/sethvargo/go-retry"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"html/template"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -27,12 +28,9 @@ type BuildManager struct {
|
|||||||
building []*ProtoPackage
|
building []*ProtoPackage
|
||||||
buildingLock *sync.RWMutex
|
buildingLock *sync.RWMutex
|
||||||
queueSignal chan struct{}
|
queueSignal chan struct{}
|
||||||
metrics struct {
|
|
||||||
queueSize *prometheus.GaugeVec
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) error {
|
func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error {
|
||||||
var (
|
var (
|
||||||
doneQ []*ProtoPackage
|
doneQ []*ProtoPackage
|
||||||
doneQLock = new(sync.RWMutex)
|
doneQLock = new(sync.RWMutex)
|
||||||
@@ -78,13 +76,12 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if package can be built with current memory limit
|
// check if package can be built with current memory limit
|
||||||
if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit { //nolint:gosec
|
if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit {
|
||||||
log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase,
|
log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase,
|
||||||
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit) //nolint:gosec
|
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit)
|
||||||
doneQLock.Lock()
|
doneQLock.Lock()
|
||||||
doneQ = append(doneQ, pkg)
|
doneQ = append(doneQ, pkg)
|
||||||
doneQLock.Unlock()
|
doneQLock.Unlock()
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,9 +90,9 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
|
|||||||
b.buildingLock.RUnlock()
|
b.buildingLock.RUnlock()
|
||||||
|
|
||||||
// check if package can be build right now
|
// check if package can be build right now
|
||||||
if currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit { //nolint:gosec
|
if !unknownBuilds && currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit {
|
||||||
log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s",
|
log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s",
|
||||||
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March) //nolint:gosec
|
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -111,18 +108,14 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
|
|||||||
b.building = append(b.building, pkg)
|
b.building = append(b.building, pkg)
|
||||||
b.buildingLock.Unlock()
|
b.buildingLock.Unlock()
|
||||||
queueNoMatch = false
|
queueNoMatch = false
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
|
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Inc()
|
|
||||||
|
|
||||||
go func(pkg *ProtoPackage) {
|
go func(pkg *ProtoPackage) {
|
||||||
dur, err := pkg.build(ctx)
|
dur, err := pkg.build(ctx)
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Dec()
|
|
||||||
if err != nil && !errors.Is(err, ErrorNotEligible) {
|
if err != nil && !errors.Is(err, ErrorNotEligible) {
|
||||||
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
|
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
|
||||||
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
|
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "built").Inc()
|
|
||||||
}
|
}
|
||||||
doneQLock.Lock()
|
doneQLock.Lock()
|
||||||
b.buildingLock.Lock()
|
b.buildingLock.Lock()
|
||||||
@@ -156,7 +149,191 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||||
|
type Pkg struct {
|
||||||
|
Pkgbase string
|
||||||
|
Status string
|
||||||
|
Class string
|
||||||
|
Skip string
|
||||||
|
Version string
|
||||||
|
Svn2GitVersion string
|
||||||
|
BuildDate string
|
||||||
|
BuildDuration time.Duration
|
||||||
|
BuildMemory *string
|
||||||
|
Checked string
|
||||||
|
Log string
|
||||||
|
LTO bool
|
||||||
|
LTOUnknown bool
|
||||||
|
LTODisabled bool
|
||||||
|
LTOAutoDisabled bool
|
||||||
|
DebugSym bool
|
||||||
|
DebugSymNotAvailable bool
|
||||||
|
DebugSymUnknown bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Repo struct {
|
||||||
|
Name string
|
||||||
|
Packages []Pkg
|
||||||
|
}
|
||||||
|
|
||||||
|
type March struct {
|
||||||
|
Name string
|
||||||
|
Repos []Repo
|
||||||
|
}
|
||||||
|
|
||||||
|
type tpl struct {
|
||||||
|
March []March
|
||||||
|
Generated string
|
||||||
|
Latest int
|
||||||
|
Failed int
|
||||||
|
Skipped int
|
||||||
|
Queued int
|
||||||
|
LTOEnabled int
|
||||||
|
LTOUnknown int
|
||||||
|
LTODisabled int
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
gen := &tpl{}
|
||||||
|
|
||||||
|
for _, march := range conf.March {
|
||||||
|
addMarch := March{
|
||||||
|
Name: march,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, repo := range conf.Repos {
|
||||||
|
addRepo := Repo{
|
||||||
|
Name: repo,
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgs := db.DBPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).
|
||||||
|
Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
|
||||||
|
|
||||||
|
for _, pkg := range pkgs {
|
||||||
|
addPkg := Pkg{
|
||||||
|
Pkgbase: pkg.Pkgbase,
|
||||||
|
Status: strings.ToUpper(pkg.Status.String()),
|
||||||
|
Class: statusID2string(pkg.Status),
|
||||||
|
Skip: pkg.SkipReason,
|
||||||
|
Version: pkg.RepoVersion,
|
||||||
|
Svn2GitVersion: pkg.Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg.STime != nil && pkg.UTime != nil {
|
||||||
|
addPkg.BuildDuration = time.Duration(*pkg.STime+*pkg.UTime) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pkg.BuildTimeStart.IsZero() {
|
||||||
|
addPkg.BuildDate = pkg.BuildTimeStart.UTC().Format(time.RFC1123)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pkg.Updated.IsZero() {
|
||||||
|
addPkg.Checked = pkg.Updated.UTC().Format(time.RFC1123)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg.Status == dbpackage.StatusFailed {
|
||||||
|
addPkg.Log = fmt.Sprintf("%s/%s/%s.log", logDir, pkg.March, pkg.Pkgbase)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg.MaxRss != nil {
|
||||||
|
hrSize := (datasize.ByteSize(*pkg.MaxRss) * datasize.KB).HumanReadable()
|
||||||
|
addPkg.BuildMemory = &hrSize
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pkg.Lto {
|
||||||
|
case dbpackage.LtoUnknown:
|
||||||
|
if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed {
|
||||||
|
addPkg.LTOUnknown = true
|
||||||
|
}
|
||||||
|
case dbpackage.LtoEnabled:
|
||||||
|
addPkg.LTO = true
|
||||||
|
case dbpackage.LtoDisabled:
|
||||||
|
addPkg.LTODisabled = true
|
||||||
|
case dbpackage.LtoAutoDisabled:
|
||||||
|
addPkg.LTOAutoDisabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pkg.DebugSymbols {
|
||||||
|
case dbpackage.DebugSymbolsUnknown:
|
||||||
|
if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed {
|
||||||
|
addPkg.DebugSymUnknown = true
|
||||||
|
}
|
||||||
|
case dbpackage.DebugSymbolsAvailable:
|
||||||
|
addPkg.DebugSym = true
|
||||||
|
case dbpackage.DebugSymbolsNotAvailable:
|
||||||
|
addPkg.DebugSymNotAvailable = true
|
||||||
|
}
|
||||||
|
|
||||||
|
addRepo.Packages = append(addRepo.Packages, addPkg)
|
||||||
|
}
|
||||||
|
addMarch.Repos = append(addMarch.Repos, addRepo)
|
||||||
|
}
|
||||||
|
gen.March = append(gen.March, addMarch)
|
||||||
|
}
|
||||||
|
|
||||||
|
gen.Generated = time.Now().UTC().Format(time.RFC1123)
|
||||||
|
|
||||||
|
var v []struct {
|
||||||
|
Status dbpackage.Status `json:"status"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
db.DBPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v)
|
||||||
|
|
||||||
|
for _, c := range v {
|
||||||
|
switch c.Status {
|
||||||
|
case dbpackage.StatusFailed:
|
||||||
|
gen.Failed = c.Count
|
||||||
|
case dbpackage.StatusSkipped:
|
||||||
|
gen.Skipped = c.Count
|
||||||
|
case dbpackage.StatusLatest:
|
||||||
|
gen.Latest = c.Count
|
||||||
|
case dbpackage.StatusQueued:
|
||||||
|
gen.Queued = c.Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var v2 []struct {
|
||||||
|
Status dbpackage.Lto `json:"lto"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
db.DBPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).
|
||||||
|
GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
|
||||||
|
|
||||||
|
for _, c := range v2 {
|
||||||
|
switch c.Status {
|
||||||
|
case dbpackage.LtoUnknown:
|
||||||
|
gen.LTOUnknown = c.Count
|
||||||
|
case dbpackage.LtoDisabled, dbpackage.LtoAutoDisabled:
|
||||||
|
gen.LTODisabled += c.Count
|
||||||
|
case dbpackage.LtoEnabled:
|
||||||
|
gen.LTOEnabled = c.Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
statusTpl, err := template.ParseFiles("tpl/packages.html")
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[HTML] Error parsing template file: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[HTML] Erro ropening output file: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = statusTpl.Execute(f, gen)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[HTML] Error filling template: %v", err)
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
time.Sleep(time.Minute * 5)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuildManager) repoWorker(repo string) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case pkgL := <-b.repoAdd[repo]:
|
case pkgL := <-b.repoAdd[repo]:
|
||||||
@@ -168,7 +345,7 @@ func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|||||||
|
|
||||||
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"}
|
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"}
|
||||||
args = append(args, toAdd...)
|
args = append(args, toAdd...)
|
||||||
cmd := exec.CommandContext(ctx, "repo-add", args...)
|
cmd := exec.Command("repo-add", args...)
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil && cmd.ProcessState.ExitCode() != 1 {
|
if err != nil && cmd.ProcessState.ExitCode() != 1 {
|
||||||
@@ -176,7 +353,7 @@ func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, pkg := range pkgL {
|
for _, pkg := range pkgL {
|
||||||
err = pkg.toDBPackage(ctx, true)
|
err = pkg.toDBPackage(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
|
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
|
||||||
continue
|
continue
|
||||||
@@ -194,12 +371,10 @@ func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|||||||
} else {
|
} else {
|
||||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
|
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
|
||||||
}
|
}
|
||||||
if pkg.DBPackage, err = pkgUpd.Save(ctx); err != nil {
|
pkg.DBPackage = pkgUpd.SaveX(context.Background())
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = exec.CommandContext(ctx, "paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
|
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
|
||||||
res, err = cmd.CombinedOutput()
|
res, err = cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -221,12 +396,6 @@ func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|||||||
log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||||
continue
|
continue
|
||||||
} else if len(pkg.PkgFiles) == 0 {
|
} else if len(pkg.PkgFiles) == 0 {
|
||||||
if pkg.DBPackage != nil {
|
|
||||||
err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -245,7 +414,7 @@ func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|||||||
b.repoWG.Add(1)
|
b.repoWG.Add(1)
|
||||||
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
||||||
args = append(args, realPkgs...)
|
args = append(args, realPkgs...)
|
||||||
cmd := exec.CommandContext(ctx, "repo-remove", args...)
|
cmd := exec.Command("repo-remove", args...)
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
||||||
@@ -253,10 +422,7 @@ func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if pkg.DBPackage != nil {
|
if pkg.DBPackage != nil {
|
||||||
err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
|
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(context.Background())
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range pkg.PkgFiles {
|
for _, file := range pkg.PkgFiles {
|
||||||
@@ -279,17 +445,18 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
|
log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gitPath := filepath.Join(conf.Basedir.Work, stateDir)
|
|
||||||
for {
|
for {
|
||||||
|
gitPath := filepath.Join(conf.Basedir.Work, stateDir)
|
||||||
|
|
||||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||||
cmd := exec.CommandContext(ctx, "git", "clone", "--depth=1", conf.StateRepo, gitPath) //nolint:gosec
|
cmd := exec.Command("git", "clone", "--depth=1", conf.StateRepo, gitPath) //nolint:gosec
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error cloning state repo: %v", err)
|
log.Fatalf("error cloning state repo: %v", err)
|
||||||
}
|
}
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
cmd := exec.CommandContext(ctx, "git", "reset", "--hard")
|
cmd := exec.Command("git", "reset", "--hard")
|
||||||
cmd.Dir = gitPath
|
cmd.Dir = gitPath
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
@@ -297,7 +464,7 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
log.Fatalf("error reseting state repo: %v", err)
|
log.Fatalf("error reseting state repo: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = exec.CommandContext(ctx, "git", "pull")
|
cmd = exec.Command("git", "pull")
|
||||||
cmd.Dir = gitPath
|
cmd.Dir = gitPath
|
||||||
res, err = cmd.CombinedOutput()
|
res, err = cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
@@ -311,8 +478,9 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
splitRepo := strings.Split(repo, "-")
|
splitRepo := strings.Split(repo, "-")
|
||||||
|
repo := repo
|
||||||
go func() {
|
go func() {
|
||||||
err := housekeeping(ctx, splitRepo[0], strings.Join(splitRepo[1:], "-"), wg)
|
err := housekeeping(splitRepo[0], strings.Join(splitRepo[1:], "-"), wg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("[%s] housekeeping failed: %v", repo, err)
|
log.Warningf("[%s] housekeeping failed: %v", repo, err)
|
||||||
}
|
}
|
||||||
@@ -320,11 +488,10 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
err := logHK(ctx)
|
err := logHK()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("log-housekeeping failed: %v", err)
|
log.Warningf("log-housekeeping failed: %v", err)
|
||||||
}
|
}
|
||||||
debugHK()
|
|
||||||
|
|
||||||
// fetch updates between sync runs
|
// fetch updates between sync runs
|
||||||
b.alpmMutex.Lock()
|
b.alpmMutex.Lock()
|
||||||
@@ -333,8 +500,8 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
log.Fatalf("error releasing ALPM handle: %v", err)
|
log.Fatalf("error releasing ALPM handle: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := retry.Fibonacci(ctx, 1*time.Second, func(_ context.Context) error {
|
if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error {
|
||||||
if err := setupChroot(ctx); err != nil {
|
if err := setupChroot(); err != nil {
|
||||||
log.Warningf("unable to upgrade chroot, trying again later")
|
log.Warningf("unable to upgrade chroot, trying again later")
|
||||||
return retry.RetryableError(err)
|
return retry.RetryableError(err)
|
||||||
}
|
}
|
||||||
@@ -350,21 +517,20 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
b.alpmMutex.Unlock()
|
b.alpmMutex.Unlock()
|
||||||
|
|
||||||
queue, err := b.genQueue(ctx)
|
queue, err := b.genQueue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error building queue: %v", err)
|
log.Errorf("error building queue: %v", err)
|
||||||
return err
|
} else {
|
||||||
}
|
log.Debugf("build-queue with %d items", len(queue))
|
||||||
|
err = b.buildQueue(queue, ctx)
|
||||||
log.Debugf("build-queue with %d items", len(queue))
|
if err != nil {
|
||||||
err = b.buildQueue(ctx, queue)
|
return err
|
||||||
if err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.Err() == nil {
|
if ctx.Err() == nil {
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
err = movePackagesLive(ctx, repo)
|
err = movePackagesLive(repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("[%s] error moving packages live: %v", repo, err)
|
log.Errorf("[%s] error moving packages live: %v", repo, err)
|
||||||
}
|
}
|
||||||
@@ -373,13 +539,12 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
|||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
b.metrics.queueSize.Reset()
|
|
||||||
log.Debugf("build-cycle finished")
|
log.Debugf("build-cycle finished")
|
||||||
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
|
func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
|
||||||
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
|
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error scanning for state-files: %w", err)
|
return nil, fmt.Errorf("error scanning for state-files: %w", err)
|
||||||
@@ -388,7 +553,7 @@ func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
|
|||||||
var pkgbuilds []*ProtoPackage
|
var pkgbuilds []*ProtoPackage
|
||||||
for _, stateFile := range stateFiles {
|
for _, stateFile := range stateFiles {
|
||||||
stat, err := os.Stat(stateFile)
|
stat, err := os.Stat(stateFile)
|
||||||
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") || strings.Contains(stateFile, "README.md") {
|
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -425,28 +590,19 @@ func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
|
|||||||
Arch: arch,
|
Arch: arch,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pkg.toDBPackage(ctx, false)
|
err = pkg.toDBPackage(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !pkg.isAvailable(ctx, alpmHandle) {
|
if !pkg.isAvailable(alpmHandle) {
|
||||||
log.Debugf("[QG] %s->%s not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
log.Debugf("[QG] %s->%s not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
aBuild, err := pkg.IsBuilt()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] %s->%s error determining built packages: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
|
||||||
}
|
|
||||||
if aBuild {
|
|
||||||
log.Infof("[QG] %s->%s already built, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DBPackage == nil {
|
if pkg.DBPackage == nil {
|
||||||
err = pkg.toDBPackage(ctx, true)
|
err = pkg.toDBPackage(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
||||||
continue
|
continue
|
||||||
@@ -457,22 +613,12 @@ func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// try download .SRCINFO from repo
|
if !pkg.isEligible(context.Background()) {
|
||||||
srcInfo, err := downloadSRCINFO(pkg.DBPackage.Pkgbase, state.TagRev)
|
|
||||||
if err == nil {
|
|
||||||
pkg.Srcinfo = srcInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pkg.isEligible(ctx) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetStatus(dbpackage.StatusQueued).Save(ctx)
|
pkg.DBPackage = pkg.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SaveX(context.Background())
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[QG] error updating dbpackage %s: %v", state.Pkgbase, err)
|
|
||||||
}
|
|
||||||
pkgbuilds = append(pkgbuilds, pkg)
|
pkgbuilds = append(pkgbuilds, pkg)
|
||||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Inc()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,16 @@ blacklist:
|
|||||||
- llvm
|
- llvm
|
||||||
- rust
|
- rust
|
||||||
|
|
||||||
|
status:
|
||||||
|
class:
|
||||||
|
skipped: "secondary"
|
||||||
|
queued: "warning"
|
||||||
|
latest: "primary"
|
||||||
|
failed: "danger"
|
||||||
|
signing: "success"
|
||||||
|
building: "info"
|
||||||
|
unknown: "dark"
|
||||||
|
|
||||||
build:
|
build:
|
||||||
# number of workers total
|
# number of workers total
|
||||||
worker: 4
|
worker: 4
|
||||||
@@ -44,7 +54,4 @@ build:
|
|||||||
memory_limit: "16gb"
|
memory_limit: "16gb"
|
||||||
|
|
||||||
logging:
|
logging:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
metrics:
|
|
||||||
port: 9568
|
|
||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
||||||
|
|
||||||
@@ -28,7 +27,9 @@ type Client struct {
|
|||||||
|
|
||||||
// NewClient creates a new client configured with the given options.
|
// NewClient creates a new client configured with the given options.
|
||||||
func NewClient(opts ...Option) *Client {
|
func NewClient(opts ...Option) *Client {
|
||||||
client := &Client{config: newConfig(opts...)}
|
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
||||||
|
cfg.options(opts...)
|
||||||
|
client := &Client{config: cfg}
|
||||||
client.init()
|
client.init()
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
@@ -56,13 +57,6 @@ type (
|
|||||||
Option func(*config)
|
Option func(*config)
|
||||||
)
|
)
|
||||||
|
|
||||||
// newConfig creates a new config for the client.
|
|
||||||
func newConfig(opts ...Option) config {
|
|
||||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
|
||||||
cfg.options(opts...)
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// options applies the options on the config object.
|
// options applies the options on the config object.
|
||||||
func (c *config) options(opts ...Option) {
|
func (c *config) options(opts ...Option) {
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@@ -110,14 +104,11 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
|
|
||||||
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
|
|
||||||
|
|
||||||
// Tx returns a new transactional client. The provided context
|
// Tx returns a new transactional client. The provided context
|
||||||
// is used until the transaction is committed or rolled back.
|
// is used until the transaction is committed or rolled back.
|
||||||
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||||
if _, ok := c.driver.(*txDriver); ok {
|
if _, ok := c.driver.(*txDriver); ok {
|
||||||
return nil, ErrTxStarted
|
return nil, errors.New("ent: cannot start a transaction within a transaction")
|
||||||
}
|
}
|
||||||
tx, err := newTx(ctx, c.driver)
|
tx, err := newTx(ctx, c.driver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -229,21 +220,6 @@ func (c *DBPackageClient) CreateBulk(builders ...*DBPackageCreate) *DBPackageCre
|
|||||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
|
||||||
// a builder and applies setFunc on it.
|
|
||||||
func (c *DBPackageClient) MapCreateBulk(slice any, setFunc func(*DBPackageCreate, int)) *DBPackageCreateBulk {
|
|
||||||
rv := reflect.ValueOf(slice)
|
|
||||||
if rv.Kind() != reflect.Slice {
|
|
||||||
return &DBPackageCreateBulk{err: fmt.Errorf("calling to DBPackageClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
|
||||||
}
|
|
||||||
builders := make([]*DBPackageCreate, rv.Len())
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
builders[i] = c.Create()
|
|
||||||
setFunc(builders[i], i)
|
|
||||||
}
|
|
||||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns an update builder for DBPackage.
|
// Update returns an update builder for DBPackage.
|
||||||
func (c *DBPackageClient) Update() *DBPackageUpdate {
|
func (c *DBPackageClient) Update() *DBPackageUpdate {
|
||||||
mutation := newDBPackageMutation(c.config, OpUpdate)
|
mutation := newDBPackageMutation(c.config, OpUpdate)
|
||||||
@@ -251,8 +227,8 @@ func (c *DBPackageClient) Update() *DBPackageUpdate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateOne returns an update builder for the given entity.
|
// UpdateOne returns an update builder for the given entity.
|
||||||
func (c *DBPackageClient) UpdateOne(_m *DBPackage) *DBPackageUpdateOne {
|
func (c *DBPackageClient) UpdateOne(dp *DBPackage) *DBPackageUpdateOne {
|
||||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(_m))
|
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(dp))
|
||||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -269,8 +245,8 @@ func (c *DBPackageClient) Delete() *DBPackageDelete {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeleteOne returns a builder for deleting the given entity.
|
// DeleteOne returns a builder for deleting the given entity.
|
||||||
func (c *DBPackageClient) DeleteOne(_m *DBPackage) *DBPackageDeleteOne {
|
func (c *DBPackageClient) DeleteOne(dp *DBPackage) *DBPackageDeleteOne {
|
||||||
return c.DeleteOneID(_m.ID)
|
return c.DeleteOneID(dp.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||||
|
|||||||
118
ent/dbpackage.go
118
ent/dbpackage.go
@@ -83,7 +83,7 @@ func (*DBPackage) scanValues(columns []string) ([]any, error) {
|
|||||||
|
|
||||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
// to the DBPackage fields.
|
// to the DBPackage fields.
|
||||||
func (_m *DBPackage) assignValues(columns []string, values []any) error {
|
func (dp *DBPackage) assignValues(columns []string, values []any) error {
|
||||||
if m, n := len(values), len(columns); m < n {
|
if m, n := len(values), len(columns); m < n {
|
||||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
}
|
}
|
||||||
@@ -94,18 +94,18 @@ func (_m *DBPackage) assignValues(columns []string, values []any) error {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field id", value)
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
}
|
}
|
||||||
_m.ID = int(value.Int64)
|
dp.ID = int(value.Int64)
|
||||||
case dbpackage.FieldPkgbase:
|
case dbpackage.FieldPkgbase:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field pkgbase", values[i])
|
return fmt.Errorf("unexpected type %T for field pkgbase", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Pkgbase = value.String
|
dp.Pkgbase = value.String
|
||||||
}
|
}
|
||||||
case dbpackage.FieldPackages:
|
case dbpackage.FieldPackages:
|
||||||
if value, ok := values[i].(*[]byte); !ok {
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field packages", values[i])
|
return fmt.Errorf("unexpected type %T for field packages", values[i])
|
||||||
} else if value != nil && len(*value) > 0 {
|
} else if value != nil && len(*value) > 0 {
|
||||||
if err := json.Unmarshal(*value, &_m.Packages); err != nil {
|
if err := json.Unmarshal(*value, &dp.Packages); err != nil {
|
||||||
return fmt.Errorf("unmarshal field packages: %w", err)
|
return fmt.Errorf("unmarshal field packages: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -113,118 +113,118 @@ func (_m *DBPackage) assignValues(columns []string, values []any) error {
|
|||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field status", values[i])
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Status = dbpackage.Status(value.String)
|
dp.Status = dbpackage.Status(value.String)
|
||||||
}
|
}
|
||||||
case dbpackage.FieldSkipReason:
|
case dbpackage.FieldSkipReason:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field skip_reason", values[i])
|
return fmt.Errorf("unexpected type %T for field skip_reason", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.SkipReason = value.String
|
dp.SkipReason = value.String
|
||||||
}
|
}
|
||||||
case dbpackage.FieldRepository:
|
case dbpackage.FieldRepository:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field repository", values[i])
|
return fmt.Errorf("unexpected type %T for field repository", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Repository = dbpackage.Repository(value.String)
|
dp.Repository = dbpackage.Repository(value.String)
|
||||||
}
|
}
|
||||||
case dbpackage.FieldMarch:
|
case dbpackage.FieldMarch:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field march", values[i])
|
return fmt.Errorf("unexpected type %T for field march", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.March = value.String
|
dp.March = value.String
|
||||||
}
|
}
|
||||||
case dbpackage.FieldVersion:
|
case dbpackage.FieldVersion:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field version", values[i])
|
return fmt.Errorf("unexpected type %T for field version", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Version = value.String
|
dp.Version = value.String
|
||||||
}
|
}
|
||||||
case dbpackage.FieldRepoVersion:
|
case dbpackage.FieldRepoVersion:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field repo_version", values[i])
|
return fmt.Errorf("unexpected type %T for field repo_version", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.RepoVersion = value.String
|
dp.RepoVersion = value.String
|
||||||
}
|
}
|
||||||
case dbpackage.FieldBuildTimeStart:
|
case dbpackage.FieldBuildTimeStart:
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field build_time_start", values[i])
|
return fmt.Errorf("unexpected type %T for field build_time_start", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.BuildTimeStart = value.Time
|
dp.BuildTimeStart = value.Time
|
||||||
}
|
}
|
||||||
case dbpackage.FieldUpdated:
|
case dbpackage.FieldUpdated:
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field updated", values[i])
|
return fmt.Errorf("unexpected type %T for field updated", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Updated = value.Time
|
dp.Updated = value.Time
|
||||||
}
|
}
|
||||||
case dbpackage.FieldLto:
|
case dbpackage.FieldLto:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field lto", values[i])
|
return fmt.Errorf("unexpected type %T for field lto", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Lto = dbpackage.Lto(value.String)
|
dp.Lto = dbpackage.Lto(value.String)
|
||||||
}
|
}
|
||||||
case dbpackage.FieldLastVersionBuild:
|
case dbpackage.FieldLastVersionBuild:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field last_version_build", values[i])
|
return fmt.Errorf("unexpected type %T for field last_version_build", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.LastVersionBuild = value.String
|
dp.LastVersionBuild = value.String
|
||||||
}
|
}
|
||||||
case dbpackage.FieldLastVerified:
|
case dbpackage.FieldLastVerified:
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field last_verified", values[i])
|
return fmt.Errorf("unexpected type %T for field last_verified", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.LastVerified = value.Time
|
dp.LastVerified = value.Time
|
||||||
}
|
}
|
||||||
case dbpackage.FieldDebugSymbols:
|
case dbpackage.FieldDebugSymbols:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field debug_symbols", values[i])
|
return fmt.Errorf("unexpected type %T for field debug_symbols", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.DebugSymbols = dbpackage.DebugSymbols(value.String)
|
dp.DebugSymbols = dbpackage.DebugSymbols(value.String)
|
||||||
}
|
}
|
||||||
case dbpackage.FieldMaxRss:
|
case dbpackage.FieldMaxRss:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field max_rss", values[i])
|
return fmt.Errorf("unexpected type %T for field max_rss", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.MaxRss = new(int64)
|
dp.MaxRss = new(int64)
|
||||||
*_m.MaxRss = value.Int64
|
*dp.MaxRss = value.Int64
|
||||||
}
|
}
|
||||||
case dbpackage.FieldUTime:
|
case dbpackage.FieldUTime:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field u_time", values[i])
|
return fmt.Errorf("unexpected type %T for field u_time", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.UTime = new(int64)
|
dp.UTime = new(int64)
|
||||||
*_m.UTime = value.Int64
|
*dp.UTime = value.Int64
|
||||||
}
|
}
|
||||||
case dbpackage.FieldSTime:
|
case dbpackage.FieldSTime:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field s_time", values[i])
|
return fmt.Errorf("unexpected type %T for field s_time", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.STime = new(int64)
|
dp.STime = new(int64)
|
||||||
*_m.STime = value.Int64
|
*dp.STime = value.Int64
|
||||||
}
|
}
|
||||||
case dbpackage.FieldIoIn:
|
case dbpackage.FieldIoIn:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field io_in", values[i])
|
return fmt.Errorf("unexpected type %T for field io_in", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.IoIn = new(int64)
|
dp.IoIn = new(int64)
|
||||||
*_m.IoIn = value.Int64
|
*dp.IoIn = value.Int64
|
||||||
}
|
}
|
||||||
case dbpackage.FieldIoOut:
|
case dbpackage.FieldIoOut:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field io_out", values[i])
|
return fmt.Errorf("unexpected type %T for field io_out", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.IoOut = new(int64)
|
dp.IoOut = new(int64)
|
||||||
*_m.IoOut = value.Int64
|
*dp.IoOut = value.Int64
|
||||||
}
|
}
|
||||||
case dbpackage.FieldTagRev:
|
case dbpackage.FieldTagRev:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
|
return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
|
||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.TagRev = new(string)
|
dp.TagRev = new(string)
|
||||||
*_m.TagRev = value.String
|
*dp.TagRev = value.String
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
_m.selectValues.Set(columns[i], values[i])
|
dp.selectValues.Set(columns[i], values[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -232,101 +232,101 @@ func (_m *DBPackage) assignValues(columns []string, values []any) error {
|
|||||||
|
|
||||||
// Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
|
// Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
|
||||||
// This includes values selected through modifiers, order, etc.
|
// This includes values selected through modifiers, order, etc.
|
||||||
func (_m *DBPackage) Value(name string) (ent.Value, error) {
|
func (dp *DBPackage) Value(name string) (ent.Value, error) {
|
||||||
return _m.selectValues.Get(name)
|
return dp.selectValues.Get(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update returns a builder for updating this DBPackage.
|
// Update returns a builder for updating this DBPackage.
|
||||||
// Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
|
// Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
|
||||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
func (_m *DBPackage) Update() *DBPackageUpdateOne {
|
func (dp *DBPackage) Update() *DBPackageUpdateOne {
|
||||||
return NewDBPackageClient(_m.config).UpdateOne(_m)
|
return NewDBPackageClient(dp.config).UpdateOne(dp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
|
// Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
|
||||||
// so that all future queries will be executed through the driver which created the transaction.
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
func (_m *DBPackage) Unwrap() *DBPackage {
|
func (dp *DBPackage) Unwrap() *DBPackage {
|
||||||
_tx, ok := _m.config.driver.(*txDriver)
|
_tx, ok := dp.config.driver.(*txDriver)
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("ent: DBPackage is not a transactional entity")
|
panic("ent: DBPackage is not a transactional entity")
|
||||||
}
|
}
|
||||||
_m.config.driver = _tx.drv
|
dp.config.driver = _tx.drv
|
||||||
return _m
|
return dp
|
||||||
}
|
}
|
||||||
|
|
||||||
// String implements the fmt.Stringer.
|
// String implements the fmt.Stringer.
|
||||||
func (_m *DBPackage) String() string {
|
func (dp *DBPackage) String() string {
|
||||||
var builder strings.Builder
|
var builder strings.Builder
|
||||||
builder.WriteString("DBPackage(")
|
builder.WriteString("DBPackage(")
|
||||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID))
|
||||||
builder.WriteString("pkgbase=")
|
builder.WriteString("pkgbase=")
|
||||||
builder.WriteString(_m.Pkgbase)
|
builder.WriteString(dp.Pkgbase)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("packages=")
|
builder.WriteString("packages=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Packages))
|
builder.WriteString(fmt.Sprintf("%v", dp.Packages))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("status=")
|
builder.WriteString("status=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Status))
|
builder.WriteString(fmt.Sprintf("%v", dp.Status))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("skip_reason=")
|
builder.WriteString("skip_reason=")
|
||||||
builder.WriteString(_m.SkipReason)
|
builder.WriteString(dp.SkipReason)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("repository=")
|
builder.WriteString("repository=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Repository))
|
builder.WriteString(fmt.Sprintf("%v", dp.Repository))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("march=")
|
builder.WriteString("march=")
|
||||||
builder.WriteString(_m.March)
|
builder.WriteString(dp.March)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("version=")
|
builder.WriteString("version=")
|
||||||
builder.WriteString(_m.Version)
|
builder.WriteString(dp.Version)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("repo_version=")
|
builder.WriteString("repo_version=")
|
||||||
builder.WriteString(_m.RepoVersion)
|
builder.WriteString(dp.RepoVersion)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("build_time_start=")
|
builder.WriteString("build_time_start=")
|
||||||
builder.WriteString(_m.BuildTimeStart.Format(time.ANSIC))
|
builder.WriteString(dp.BuildTimeStart.Format(time.ANSIC))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("updated=")
|
builder.WriteString("updated=")
|
||||||
builder.WriteString(_m.Updated.Format(time.ANSIC))
|
builder.WriteString(dp.Updated.Format(time.ANSIC))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("lto=")
|
builder.WriteString("lto=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Lto))
|
builder.WriteString(fmt.Sprintf("%v", dp.Lto))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("last_version_build=")
|
builder.WriteString("last_version_build=")
|
||||||
builder.WriteString(_m.LastVersionBuild)
|
builder.WriteString(dp.LastVersionBuild)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("last_verified=")
|
builder.WriteString("last_verified=")
|
||||||
builder.WriteString(_m.LastVerified.Format(time.ANSIC))
|
builder.WriteString(dp.LastVerified.Format(time.ANSIC))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("debug_symbols=")
|
builder.WriteString("debug_symbols=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.DebugSymbols))
|
builder.WriteString(fmt.Sprintf("%v", dp.DebugSymbols))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
if v := _m.MaxRss; v != nil {
|
if v := dp.MaxRss; v != nil {
|
||||||
builder.WriteString("max_rss=")
|
builder.WriteString("max_rss=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
if v := _m.UTime; v != nil {
|
if v := dp.UTime; v != nil {
|
||||||
builder.WriteString("u_time=")
|
builder.WriteString("u_time=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
if v := _m.STime; v != nil {
|
if v := dp.STime; v != nil {
|
||||||
builder.WriteString("s_time=")
|
builder.WriteString("s_time=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
if v := _m.IoIn; v != nil {
|
if v := dp.IoIn; v != nil {
|
||||||
builder.WriteString("io_in=")
|
builder.WriteString("io_in=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
if v := _m.IoOut; v != nil {
|
if v := dp.IoOut; v != nil {
|
||||||
builder.WriteString("io_out=")
|
builder.WriteString("io_out=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
if v := _m.TagRev; v != nil {
|
if v := dp.TagRev; v != nil {
|
||||||
builder.WriteString("tag_rev=")
|
builder.WriteString("tag_rev=")
|
||||||
builder.WriteString(*v)
|
builder.WriteString(*v)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ const DefaultStatus = StatusUnknown
|
|||||||
const (
|
const (
|
||||||
StatusSkipped Status = "skipped"
|
StatusSkipped Status = "skipped"
|
||||||
StatusFailed Status = "failed"
|
StatusFailed Status = "failed"
|
||||||
StatusBuilt Status = "built"
|
StatusBuild Status = "build"
|
||||||
StatusQueued Status = "queued"
|
StatusQueued Status = "queued"
|
||||||
StatusDelayed Status = "delayed"
|
StatusDelayed Status = "delayed"
|
||||||
StatusBuilding Status = "building"
|
StatusBuilding Status = "building"
|
||||||
@@ -125,7 +125,7 @@ func (s Status) String() string {
|
|||||||
// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
|
// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
|
||||||
func StatusValidator(s Status) error {
|
func StatusValidator(s Status) error {
|
||||||
switch s {
|
switch s {
|
||||||
case StatusSkipped, StatusFailed, StatusBuilt, StatusQueued, StatusDelayed, StatusBuilding, StatusLatest, StatusSigning, StatusUnknown:
|
case StatusSkipped, StatusFailed, StatusBuild, StatusQueued, StatusDelayed, StatusBuilding, StatusLatest, StatusSigning, StatusUnknown:
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("dbpackage: invalid enum value for status field: %q", s)
|
return fmt.Errorf("dbpackage: invalid enum value for status field: %q", s)
|
||||||
|
|||||||
@@ -1156,15 +1156,32 @@ func TagRevContainsFold(v string) predicate.DBPackage {
|
|||||||
|
|
||||||
// And groups predicates with the AND operator between them.
|
// And groups predicates with the AND operator between them.
|
||||||
func And(predicates ...predicate.DBPackage) predicate.DBPackage {
|
func And(predicates ...predicate.DBPackage) predicate.DBPackage {
|
||||||
return predicate.DBPackage(sql.AndPredicates(predicates...))
|
return predicate.DBPackage(func(s *sql.Selector) {
|
||||||
|
s1 := s.Clone().SetP(nil)
|
||||||
|
for _, p := range predicates {
|
||||||
|
p(s1)
|
||||||
|
}
|
||||||
|
s.Where(s1.P())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Or groups predicates with the OR operator between them.
|
// Or groups predicates with the OR operator between them.
|
||||||
func Or(predicates ...predicate.DBPackage) predicate.DBPackage {
|
func Or(predicates ...predicate.DBPackage) predicate.DBPackage {
|
||||||
return predicate.DBPackage(sql.OrPredicates(predicates...))
|
return predicate.DBPackage(func(s *sql.Selector) {
|
||||||
|
s1 := s.Clone().SetP(nil)
|
||||||
|
for i, p := range predicates {
|
||||||
|
if i > 0 {
|
||||||
|
s1.Or()
|
||||||
|
}
|
||||||
|
p(s1)
|
||||||
|
}
|
||||||
|
s.Where(s1.P())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not applies the not operator on the given predicate.
|
// Not applies the not operator on the given predicate.
|
||||||
func Not(p predicate.DBPackage) predicate.DBPackage {
|
func Not(p predicate.DBPackage) predicate.DBPackage {
|
||||||
return predicate.DBPackage(sql.NotPredicates(p))
|
return predicate.DBPackage(func(s *sql.Selector) {
|
||||||
|
p(s.Not())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,267 +21,267 @@ type DBPackageCreate struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetPkgbase sets the "pkgbase" field.
|
// SetPkgbase sets the "pkgbase" field.
|
||||||
func (_c *DBPackageCreate) SetPkgbase(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetPkgbase(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetPkgbase(v)
|
dpc.mutation.SetPkgbase(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPackages sets the "packages" field.
|
// SetPackages sets the "packages" field.
|
||||||
func (_c *DBPackageCreate) SetPackages(v []string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetPackages(s []string) *DBPackageCreate {
|
||||||
_c.mutation.SetPackages(v)
|
dpc.mutation.SetPackages(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetStatus sets the "status" field.
|
// SetStatus sets the "status" field.
|
||||||
func (_c *DBPackageCreate) SetStatus(v dbpackage.Status) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetStatus(d dbpackage.Status) *DBPackageCreate {
|
||||||
_c.mutation.SetStatus(v)
|
dpc.mutation.SetStatus(d)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableStatus(v *dbpackage.Status) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableStatus(d *dbpackage.Status) *DBPackageCreate {
|
||||||
if v != nil {
|
if d != nil {
|
||||||
_c.SetStatus(*v)
|
dpc.SetStatus(*d)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetSkipReason sets the "skip_reason" field.
|
// SetSkipReason sets the "skip_reason" field.
|
||||||
func (_c *DBPackageCreate) SetSkipReason(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetSkipReason(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetSkipReason(v)
|
dpc.mutation.SetSkipReason(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
|
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableSkipReason(v *string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableSkipReason(s *string) *DBPackageCreate {
|
||||||
if v != nil {
|
if s != nil {
|
||||||
_c.SetSkipReason(*v)
|
dpc.SetSkipReason(*s)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRepository sets the "repository" field.
|
// SetRepository sets the "repository" field.
|
||||||
func (_c *DBPackageCreate) SetRepository(v dbpackage.Repository) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetRepository(d dbpackage.Repository) *DBPackageCreate {
|
||||||
_c.mutation.SetRepository(v)
|
dpc.mutation.SetRepository(d)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMarch sets the "march" field.
|
// SetMarch sets the "march" field.
|
||||||
func (_c *DBPackageCreate) SetMarch(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetMarch(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetMarch(v)
|
dpc.mutation.SetMarch(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetVersion sets the "version" field.
|
// SetVersion sets the "version" field.
|
||||||
func (_c *DBPackageCreate) SetVersion(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetVersion(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetVersion(v)
|
dpc.mutation.SetVersion(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableVersion sets the "version" field if the given value is not nil.
|
// SetNillableVersion sets the "version" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableVersion(v *string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableVersion(s *string) *DBPackageCreate {
|
||||||
if v != nil {
|
if s != nil {
|
||||||
_c.SetVersion(*v)
|
dpc.SetVersion(*s)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRepoVersion sets the "repo_version" field.
|
// SetRepoVersion sets the "repo_version" field.
|
||||||
func (_c *DBPackageCreate) SetRepoVersion(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetRepoVersion(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetRepoVersion(v)
|
dpc.mutation.SetRepoVersion(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
|
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableRepoVersion(v *string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableRepoVersion(s *string) *DBPackageCreate {
|
||||||
if v != nil {
|
if s != nil {
|
||||||
_c.SetRepoVersion(*v)
|
dpc.SetRepoVersion(*s)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBuildTimeStart sets the "build_time_start" field.
|
// SetBuildTimeStart sets the "build_time_start" field.
|
||||||
func (_c *DBPackageCreate) SetBuildTimeStart(v time.Time) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetBuildTimeStart(t time.Time) *DBPackageCreate {
|
||||||
_c.mutation.SetBuildTimeStart(v)
|
dpc.mutation.SetBuildTimeStart(t)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
|
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableBuildTimeStart(v *time.Time) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DBPackageCreate {
|
||||||
if v != nil {
|
if t != nil {
|
||||||
_c.SetBuildTimeStart(*v)
|
dpc.SetBuildTimeStart(*t)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetUpdated sets the "updated" field.
|
// SetUpdated sets the "updated" field.
|
||||||
func (_c *DBPackageCreate) SetUpdated(v time.Time) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetUpdated(t time.Time) *DBPackageCreate {
|
||||||
_c.mutation.SetUpdated(v)
|
dpc.mutation.SetUpdated(t)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableUpdated sets the "updated" field if the given value is not nil.
|
// SetNillableUpdated sets the "updated" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableUpdated(v *time.Time) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableUpdated(t *time.Time) *DBPackageCreate {
|
||||||
if v != nil {
|
if t != nil {
|
||||||
_c.SetUpdated(*v)
|
dpc.SetUpdated(*t)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLto sets the "lto" field.
|
// SetLto sets the "lto" field.
|
||||||
func (_c *DBPackageCreate) SetLto(v dbpackage.Lto) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetLto(d dbpackage.Lto) *DBPackageCreate {
|
||||||
_c.mutation.SetLto(v)
|
dpc.mutation.SetLto(d)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableLto sets the "lto" field if the given value is not nil.
|
// SetNillableLto sets the "lto" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableLto(v *dbpackage.Lto) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableLto(d *dbpackage.Lto) *DBPackageCreate {
|
||||||
if v != nil {
|
if d != nil {
|
||||||
_c.SetLto(*v)
|
dpc.SetLto(*d)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLastVersionBuild sets the "last_version_build" field.
|
// SetLastVersionBuild sets the "last_version_build" field.
|
||||||
func (_c *DBPackageCreate) SetLastVersionBuild(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetLastVersionBuild(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetLastVersionBuild(v)
|
dpc.mutation.SetLastVersionBuild(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
|
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableLastVersionBuild(v *string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableLastVersionBuild(s *string) *DBPackageCreate {
|
||||||
if v != nil {
|
if s != nil {
|
||||||
_c.SetLastVersionBuild(*v)
|
dpc.SetLastVersionBuild(*s)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLastVerified sets the "last_verified" field.
|
// SetLastVerified sets the "last_verified" field.
|
||||||
func (_c *DBPackageCreate) SetLastVerified(v time.Time) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetLastVerified(t time.Time) *DBPackageCreate {
|
||||||
_c.mutation.SetLastVerified(v)
|
dpc.mutation.SetLastVerified(t)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
|
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableLastVerified(v *time.Time) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableLastVerified(t *time.Time) *DBPackageCreate {
|
||||||
if v != nil {
|
if t != nil {
|
||||||
_c.SetLastVerified(*v)
|
dpc.SetLastVerified(*t)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDebugSymbols sets the "debug_symbols" field.
|
// SetDebugSymbols sets the "debug_symbols" field.
|
||||||
func (_c *DBPackageCreate) SetDebugSymbols(v dbpackage.DebugSymbols) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DBPackageCreate {
|
||||||
_c.mutation.SetDebugSymbols(v)
|
dpc.mutation.SetDebugSymbols(ds)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
|
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableDebugSymbols(v *dbpackage.DebugSymbols) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DBPackageCreate {
|
||||||
if v != nil {
|
if ds != nil {
|
||||||
_c.SetDebugSymbols(*v)
|
dpc.SetDebugSymbols(*ds)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMaxRss sets the "max_rss" field.
|
// SetMaxRss sets the "max_rss" field.
|
||||||
func (_c *DBPackageCreate) SetMaxRss(v int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetMaxRss(i int64) *DBPackageCreate {
|
||||||
_c.mutation.SetMaxRss(v)
|
dpc.mutation.SetMaxRss(i)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
|
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableMaxRss(v *int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableMaxRss(i *int64) *DBPackageCreate {
|
||||||
if v != nil {
|
if i != nil {
|
||||||
_c.SetMaxRss(*v)
|
dpc.SetMaxRss(*i)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetUTime sets the "u_time" field.
|
// SetUTime sets the "u_time" field.
|
||||||
func (_c *DBPackageCreate) SetUTime(v int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetUTime(i int64) *DBPackageCreate {
|
||||||
_c.mutation.SetUTime(v)
|
dpc.mutation.SetUTime(i)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableUTime sets the "u_time" field if the given value is not nil.
|
// SetNillableUTime sets the "u_time" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableUTime(v *int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableUTime(i *int64) *DBPackageCreate {
|
||||||
if v != nil {
|
if i != nil {
|
||||||
_c.SetUTime(*v)
|
dpc.SetUTime(*i)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetSTime sets the "s_time" field.
|
// SetSTime sets the "s_time" field.
|
||||||
func (_c *DBPackageCreate) SetSTime(v int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetSTime(i int64) *DBPackageCreate {
|
||||||
_c.mutation.SetSTime(v)
|
dpc.mutation.SetSTime(i)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableSTime sets the "s_time" field if the given value is not nil.
|
// SetNillableSTime sets the "s_time" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableSTime(v *int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableSTime(i *int64) *DBPackageCreate {
|
||||||
if v != nil {
|
if i != nil {
|
||||||
_c.SetSTime(*v)
|
dpc.SetSTime(*i)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetIoIn sets the "io_in" field.
|
// SetIoIn sets the "io_in" field.
|
||||||
func (_c *DBPackageCreate) SetIoIn(v int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetIoIn(i int64) *DBPackageCreate {
|
||||||
_c.mutation.SetIoIn(v)
|
dpc.mutation.SetIoIn(i)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableIoIn sets the "io_in" field if the given value is not nil.
|
// SetNillableIoIn sets the "io_in" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableIoIn(v *int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableIoIn(i *int64) *DBPackageCreate {
|
||||||
if v != nil {
|
if i != nil {
|
||||||
_c.SetIoIn(*v)
|
dpc.SetIoIn(*i)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetIoOut sets the "io_out" field.
|
// SetIoOut sets the "io_out" field.
|
||||||
func (_c *DBPackageCreate) SetIoOut(v int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetIoOut(i int64) *DBPackageCreate {
|
||||||
_c.mutation.SetIoOut(v)
|
dpc.mutation.SetIoOut(i)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableIoOut sets the "io_out" field if the given value is not nil.
|
// SetNillableIoOut sets the "io_out" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableIoOut(v *int64) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableIoOut(i *int64) *DBPackageCreate {
|
||||||
if v != nil {
|
if i != nil {
|
||||||
_c.SetIoOut(*v)
|
dpc.SetIoOut(*i)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTagRev sets the "tag_rev" field.
|
// SetTagRev sets the "tag_rev" field.
|
||||||
func (_c *DBPackageCreate) SetTagRev(v string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetTagRev(s string) *DBPackageCreate {
|
||||||
_c.mutation.SetTagRev(v)
|
dpc.mutation.SetTagRev(s)
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
|
// SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
|
||||||
func (_c *DBPackageCreate) SetNillableTagRev(v *string) *DBPackageCreate {
|
func (dpc *DBPackageCreate) SetNillableTagRev(s *string) *DBPackageCreate {
|
||||||
if v != nil {
|
if s != nil {
|
||||||
_c.SetTagRev(*v)
|
dpc.SetTagRev(*s)
|
||||||
}
|
}
|
||||||
return _c
|
return dpc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mutation returns the DBPackageMutation object of the builder.
|
// Mutation returns the DBPackageMutation object of the builder.
|
||||||
func (_c *DBPackageCreate) Mutation() *DBPackageMutation {
|
func (dpc *DBPackageCreate) Mutation() *DBPackageMutation {
|
||||||
return _c.mutation
|
return dpc.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save creates the DBPackage in the database.
|
// Save creates the DBPackage in the database.
|
||||||
func (_c *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
|
func (dpc *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
|
||||||
_c.defaults()
|
dpc.defaults()
|
||||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveX calls Save and panics if Save returns an error.
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
func (_c *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
|
func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
|
||||||
v, err := _c.Save(ctx)
|
v, err := dpc.Save(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -289,71 +289,71 @@ func (_c *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (_c *DBPackageCreate) Exec(ctx context.Context) error {
|
func (dpc *DBPackageCreate) Exec(ctx context.Context) error {
|
||||||
_, err := _c.Save(ctx)
|
_, err := dpc.Save(ctx)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
func (_c *DBPackageCreate) ExecX(ctx context.Context) {
|
func (dpc *DBPackageCreate) ExecX(ctx context.Context) {
|
||||||
if err := _c.Exec(ctx); err != nil {
|
if err := dpc.Exec(ctx); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaults sets the default values of the builder before save.
|
// defaults sets the default values of the builder before save.
|
||||||
func (_c *DBPackageCreate) defaults() {
|
func (dpc *DBPackageCreate) defaults() {
|
||||||
if _, ok := _c.mutation.Status(); !ok {
|
if _, ok := dpc.mutation.Status(); !ok {
|
||||||
v := dbpackage.DefaultStatus
|
v := dbpackage.DefaultStatus
|
||||||
_c.mutation.SetStatus(v)
|
dpc.mutation.SetStatus(v)
|
||||||
}
|
}
|
||||||
if _, ok := _c.mutation.Lto(); !ok {
|
if _, ok := dpc.mutation.Lto(); !ok {
|
||||||
v := dbpackage.DefaultLto
|
v := dbpackage.DefaultLto
|
||||||
_c.mutation.SetLto(v)
|
dpc.mutation.SetLto(v)
|
||||||
}
|
}
|
||||||
if _, ok := _c.mutation.DebugSymbols(); !ok {
|
if _, ok := dpc.mutation.DebugSymbols(); !ok {
|
||||||
v := dbpackage.DefaultDebugSymbols
|
v := dbpackage.DefaultDebugSymbols
|
||||||
_c.mutation.SetDebugSymbols(v)
|
dpc.mutation.SetDebugSymbols(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
// check runs all checks and user-defined validators on the builder.
|
||||||
func (_c *DBPackageCreate) check() error {
|
func (dpc *DBPackageCreate) check() error {
|
||||||
if _, ok := _c.mutation.Pkgbase(); !ok {
|
if _, ok := dpc.mutation.Pkgbase(); !ok {
|
||||||
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
|
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
|
||||||
}
|
}
|
||||||
if v, ok := _c.mutation.Pkgbase(); ok {
|
if v, ok := dpc.mutation.Pkgbase(); ok {
|
||||||
if err := dbpackage.PkgbaseValidator(v); err != nil {
|
if err := dbpackage.PkgbaseValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
|
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if v, ok := _c.mutation.Status(); ok {
|
if v, ok := dpc.mutation.Status(); ok {
|
||||||
if err := dbpackage.StatusValidator(v); err != nil {
|
if err := dbpackage.StatusValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, ok := _c.mutation.Repository(); !ok {
|
if _, ok := dpc.mutation.Repository(); !ok {
|
||||||
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
|
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
|
||||||
}
|
}
|
||||||
if v, ok := _c.mutation.Repository(); ok {
|
if v, ok := dpc.mutation.Repository(); ok {
|
||||||
if err := dbpackage.RepositoryValidator(v); err != nil {
|
if err := dbpackage.RepositoryValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
|
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, ok := _c.mutation.March(); !ok {
|
if _, ok := dpc.mutation.March(); !ok {
|
||||||
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
|
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
|
||||||
}
|
}
|
||||||
if v, ok := _c.mutation.March(); ok {
|
if v, ok := dpc.mutation.March(); ok {
|
||||||
if err := dbpackage.MarchValidator(v); err != nil {
|
if err := dbpackage.MarchValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
|
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if v, ok := _c.mutation.Lto(); ok {
|
if v, ok := dpc.mutation.Lto(); ok {
|
||||||
if err := dbpackage.LtoValidator(v); err != nil {
|
if err := dbpackage.LtoValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
|
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if v, ok := _c.mutation.DebugSymbols(); ok {
|
if v, ok := dpc.mutation.DebugSymbols(); ok {
|
||||||
if err := dbpackage.DebugSymbolsValidator(v); err != nil {
|
if err := dbpackage.DebugSymbolsValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
|
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
|
||||||
}
|
}
|
||||||
@@ -361,12 +361,12 @@ func (_c *DBPackageCreate) check() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
|
func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
|
||||||
if err := _c.check(); err != nil {
|
if err := dpc.check(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
_node, _spec := _c.createSpec()
|
_node, _spec := dpc.createSpec()
|
||||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
if err := sqlgraph.CreateNode(ctx, dpc.driver, _spec); err != nil {
|
||||||
if sqlgraph.IsConstraintError(err) {
|
if sqlgraph.IsConstraintError(err) {
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
}
|
}
|
||||||
@@ -374,93 +374,93 @@ func (_c *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
|
|||||||
}
|
}
|
||||||
id := _spec.ID.Value.(int64)
|
id := _spec.ID.Value.(int64)
|
||||||
_node.ID = int(id)
|
_node.ID = int(id)
|
||||||
_c.mutation.id = &_node.ID
|
dpc.mutation.id = &_node.ID
|
||||||
_c.mutation.done = true
|
dpc.mutation.done = true
|
||||||
return _node, nil
|
return _node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
|
func (dpc *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
|
||||||
var (
|
var (
|
||||||
_node = &DBPackage{config: _c.config}
|
_node = &DBPackage{config: dpc.config}
|
||||||
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||||
)
|
)
|
||||||
if value, ok := _c.mutation.Pkgbase(); ok {
|
if value, ok := dpc.mutation.Pkgbase(); ok {
|
||||||
_spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value)
|
||||||
_node.Pkgbase = value
|
_node.Pkgbase = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.Packages(); ok {
|
if value, ok := dpc.mutation.Packages(); ok {
|
||||||
_spec.SetField(dbpackage.FieldPackages, field.TypeJSON, value)
|
_spec.SetField(dbpackage.FieldPackages, field.TypeJSON, value)
|
||||||
_node.Packages = value
|
_node.Packages = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.Status(); ok {
|
if value, ok := dpc.mutation.Status(); ok {
|
||||||
_spec.SetField(dbpackage.FieldStatus, field.TypeEnum, value)
|
_spec.SetField(dbpackage.FieldStatus, field.TypeEnum, value)
|
||||||
_node.Status = value
|
_node.Status = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.SkipReason(); ok {
|
if value, ok := dpc.mutation.SkipReason(); ok {
|
||||||
_spec.SetField(dbpackage.FieldSkipReason, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldSkipReason, field.TypeString, value)
|
||||||
_node.SkipReason = value
|
_node.SkipReason = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.Repository(); ok {
|
if value, ok := dpc.mutation.Repository(); ok {
|
||||||
_spec.SetField(dbpackage.FieldRepository, field.TypeEnum, value)
|
_spec.SetField(dbpackage.FieldRepository, field.TypeEnum, value)
|
||||||
_node.Repository = value
|
_node.Repository = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.March(); ok {
|
if value, ok := dpc.mutation.March(); ok {
|
||||||
_spec.SetField(dbpackage.FieldMarch, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldMarch, field.TypeString, value)
|
||||||
_node.March = value
|
_node.March = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.Version(); ok {
|
if value, ok := dpc.mutation.Version(); ok {
|
||||||
_spec.SetField(dbpackage.FieldVersion, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldVersion, field.TypeString, value)
|
||||||
_node.Version = value
|
_node.Version = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.RepoVersion(); ok {
|
if value, ok := dpc.mutation.RepoVersion(); ok {
|
||||||
_spec.SetField(dbpackage.FieldRepoVersion, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldRepoVersion, field.TypeString, value)
|
||||||
_node.RepoVersion = value
|
_node.RepoVersion = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.BuildTimeStart(); ok {
|
if value, ok := dpc.mutation.BuildTimeStart(); ok {
|
||||||
_spec.SetField(dbpackage.FieldBuildTimeStart, field.TypeTime, value)
|
_spec.SetField(dbpackage.FieldBuildTimeStart, field.TypeTime, value)
|
||||||
_node.BuildTimeStart = value
|
_node.BuildTimeStart = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.Updated(); ok {
|
if value, ok := dpc.mutation.Updated(); ok {
|
||||||
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
|
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
|
||||||
_node.Updated = value
|
_node.Updated = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.Lto(); ok {
|
if value, ok := dpc.mutation.Lto(); ok {
|
||||||
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
|
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
|
||||||
_node.Lto = value
|
_node.Lto = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.LastVersionBuild(); ok {
|
if value, ok := dpc.mutation.LastVersionBuild(); ok {
|
||||||
_spec.SetField(dbpackage.FieldLastVersionBuild, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldLastVersionBuild, field.TypeString, value)
|
||||||
_node.LastVersionBuild = value
|
_node.LastVersionBuild = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.LastVerified(); ok {
|
if value, ok := dpc.mutation.LastVerified(); ok {
|
||||||
_spec.SetField(dbpackage.FieldLastVerified, field.TypeTime, value)
|
_spec.SetField(dbpackage.FieldLastVerified, field.TypeTime, value)
|
||||||
_node.LastVerified = value
|
_node.LastVerified = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.DebugSymbols(); ok {
|
if value, ok := dpc.mutation.DebugSymbols(); ok {
|
||||||
_spec.SetField(dbpackage.FieldDebugSymbols, field.TypeEnum, value)
|
_spec.SetField(dbpackage.FieldDebugSymbols, field.TypeEnum, value)
|
||||||
_node.DebugSymbols = value
|
_node.DebugSymbols = value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.MaxRss(); ok {
|
if value, ok := dpc.mutation.MaxRss(); ok {
|
||||||
_spec.SetField(dbpackage.FieldMaxRss, field.TypeInt64, value)
|
_spec.SetField(dbpackage.FieldMaxRss, field.TypeInt64, value)
|
||||||
_node.MaxRss = &value
|
_node.MaxRss = &value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.UTime(); ok {
|
if value, ok := dpc.mutation.UTime(); ok {
|
||||||
_spec.SetField(dbpackage.FieldUTime, field.TypeInt64, value)
|
_spec.SetField(dbpackage.FieldUTime, field.TypeInt64, value)
|
||||||
_node.UTime = &value
|
_node.UTime = &value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.STime(); ok {
|
if value, ok := dpc.mutation.STime(); ok {
|
||||||
_spec.SetField(dbpackage.FieldSTime, field.TypeInt64, value)
|
_spec.SetField(dbpackage.FieldSTime, field.TypeInt64, value)
|
||||||
_node.STime = &value
|
_node.STime = &value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.IoIn(); ok {
|
if value, ok := dpc.mutation.IoIn(); ok {
|
||||||
_spec.SetField(dbpackage.FieldIoIn, field.TypeInt64, value)
|
_spec.SetField(dbpackage.FieldIoIn, field.TypeInt64, value)
|
||||||
_node.IoIn = &value
|
_node.IoIn = &value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.IoOut(); ok {
|
if value, ok := dpc.mutation.IoOut(); ok {
|
||||||
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
|
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
|
||||||
_node.IoOut = &value
|
_node.IoOut = &value
|
||||||
}
|
}
|
||||||
if value, ok := _c.mutation.TagRev(); ok {
|
if value, ok := dpc.mutation.TagRev(); ok {
|
||||||
_spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
|
_spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
|
||||||
_node.TagRev = &value
|
_node.TagRev = &value
|
||||||
}
|
}
|
||||||
@@ -470,21 +470,17 @@ func (_c *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
|
|||||||
// DBPackageCreateBulk is the builder for creating many DBPackage entities in bulk.
|
// DBPackageCreateBulk is the builder for creating many DBPackage entities in bulk.
|
||||||
type DBPackageCreateBulk struct {
|
type DBPackageCreateBulk struct {
|
||||||
config
|
config
|
||||||
err error
|
|
||||||
builders []*DBPackageCreate
|
builders []*DBPackageCreate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save creates the DBPackage entities in the database.
|
// Save creates the DBPackage entities in the database.
|
||||||
func (_c *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
||||||
if _c.err != nil {
|
specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders))
|
||||||
return nil, _c.err
|
nodes := make([]*DBPackage, len(dpcb.builders))
|
||||||
}
|
mutators := make([]Mutator, len(dpcb.builders))
|
||||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
for i := range dpcb.builders {
|
||||||
nodes := make([]*DBPackage, len(_c.builders))
|
|
||||||
mutators := make([]Mutator, len(_c.builders))
|
|
||||||
for i := range _c.builders {
|
|
||||||
func(i int, root context.Context) {
|
func(i int, root context.Context) {
|
||||||
builder := _c.builders[i]
|
builder := dpcb.builders[i]
|
||||||
builder.defaults()
|
builder.defaults()
|
||||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
mutation, ok := m.(*DBPackageMutation)
|
mutation, ok := m.(*DBPackageMutation)
|
||||||
@@ -498,11 +494,11 @@ func (_c *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
|||||||
var err error
|
var err error
|
||||||
nodes[i], specs[i] = builder.createSpec()
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
if i < len(mutators)-1 {
|
if i < len(mutators)-1 {
|
||||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
_, err = mutators[i+1].Mutate(root, dpcb.builders[i+1].mutation)
|
||||||
} else {
|
} else {
|
||||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
// Invoke the actual operation on the latest mutation in the chain.
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
if err = sqlgraph.BatchCreate(ctx, dpcb.driver, spec); err != nil {
|
||||||
if sqlgraph.IsConstraintError(err) {
|
if sqlgraph.IsConstraintError(err) {
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
}
|
}
|
||||||
@@ -526,7 +522,7 @@ func (_c *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
|||||||
}(i, ctx)
|
}(i, ctx)
|
||||||
}
|
}
|
||||||
if len(mutators) > 0 {
|
if len(mutators) > 0 {
|
||||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
if _, err := mutators[0].Mutate(ctx, dpcb.builders[0].mutation); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -534,8 +530,8 @@ func (_c *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
func (_c *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
|
func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
|
||||||
v, err := _c.Save(ctx)
|
v, err := dpcb.Save(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -543,14 +539,14 @@ func (_c *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (_c *DBPackageCreateBulk) Exec(ctx context.Context) error {
|
func (dpcb *DBPackageCreateBulk) Exec(ctx context.Context) error {
|
||||||
_, err := _c.Save(ctx)
|
_, err := dpcb.Save(ctx)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
func (_c *DBPackageCreateBulk) ExecX(ctx context.Context) {
|
func (dpcb *DBPackageCreateBulk) ExecX(ctx context.Context) {
|
||||||
if err := _c.Exec(ctx); err != nil {
|
if err := dpcb.Exec(ctx); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,56 +20,56 @@ type DBPackageDelete struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Where appends a list predicates to the DBPackageDelete builder.
|
// Where appends a list predicates to the DBPackageDelete builder.
|
||||||
func (_d *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
|
func (dpd *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
|
||||||
_d.mutation.Where(ps...)
|
dpd.mutation.Where(ps...)
|
||||||
return _d
|
return dpd
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
func (_d *DBPackageDelete) Exec(ctx context.Context) (int, error) {
|
func (dpd *DBPackageDelete) Exec(ctx context.Context) (int, error) {
|
||||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
func (_d *DBPackageDelete) ExecX(ctx context.Context) int {
|
func (dpd *DBPackageDelete) ExecX(ctx context.Context) int {
|
||||||
n, err := _d.Exec(ctx)
|
n, err := dpd.Exec(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_d *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
func (dpd *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
if ps := dpd.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
affected, err := sqlgraph.DeleteNodes(ctx, dpd.driver, _spec)
|
||||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
}
|
}
|
||||||
_d.mutation.done = true
|
dpd.mutation.done = true
|
||||||
return affected, err
|
return affected, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
|
// DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
|
||||||
type DBPackageDeleteOne struct {
|
type DBPackageDeleteOne struct {
|
||||||
_d *DBPackageDelete
|
dpd *DBPackageDelete
|
||||||
}
|
}
|
||||||
|
|
||||||
// Where appends a list predicates to the DBPackageDelete builder.
|
// Where appends a list predicates to the DBPackageDelete builder.
|
||||||
func (_d *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
|
func (dpdo *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
|
||||||
_d._d.mutation.Where(ps...)
|
dpdo.dpd.mutation.Where(ps...)
|
||||||
return _d
|
return dpdo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exec executes the deletion query.
|
// Exec executes the deletion query.
|
||||||
func (_d *DBPackageDeleteOne) Exec(ctx context.Context) error {
|
func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error {
|
||||||
n, err := _d._d.Exec(ctx)
|
n, err := dpdo.dpd.Exec(ctx)
|
||||||
switch {
|
switch {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
@@ -81,8 +81,8 @@ func (_d *DBPackageDeleteOne) Exec(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
func (_d *DBPackageDeleteOne) ExecX(ctx context.Context) {
|
func (dpdo *DBPackageDeleteOne) ExecX(ctx context.Context) {
|
||||||
if err := _d.Exec(ctx); err != nil {
|
if err := dpdo.Exec(ctx); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -29,40 +28,40 @@ type DBPackageQuery struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Where adds a new predicate for the DBPackageQuery builder.
|
// Where adds a new predicate for the DBPackageQuery builder.
|
||||||
func (_q *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery {
|
func (dpq *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery {
|
||||||
_q.predicates = append(_q.predicates, ps...)
|
dpq.predicates = append(dpq.predicates, ps...)
|
||||||
return _q
|
return dpq
|
||||||
}
|
}
|
||||||
|
|
||||||
// Limit the number of records to be returned by this query.
|
// Limit the number of records to be returned by this query.
|
||||||
func (_q *DBPackageQuery) Limit(limit int) *DBPackageQuery {
|
func (dpq *DBPackageQuery) Limit(limit int) *DBPackageQuery {
|
||||||
_q.ctx.Limit = &limit
|
dpq.ctx.Limit = &limit
|
||||||
return _q
|
return dpq
|
||||||
}
|
}
|
||||||
|
|
||||||
// Offset to start from.
|
// Offset to start from.
|
||||||
func (_q *DBPackageQuery) Offset(offset int) *DBPackageQuery {
|
func (dpq *DBPackageQuery) Offset(offset int) *DBPackageQuery {
|
||||||
_q.ctx.Offset = &offset
|
dpq.ctx.Offset = &offset
|
||||||
return _q
|
return dpq
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unique configures the query builder to filter duplicate records on query.
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
// By default, unique is set to true, and can be disabled using this method.
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
func (_q *DBPackageQuery) Unique(unique bool) *DBPackageQuery {
|
func (dpq *DBPackageQuery) Unique(unique bool) *DBPackageQuery {
|
||||||
_q.ctx.Unique = &unique
|
dpq.ctx.Unique = &unique
|
||||||
return _q
|
return dpq
|
||||||
}
|
}
|
||||||
|
|
||||||
// Order specifies how the records should be ordered.
|
// Order specifies how the records should be ordered.
|
||||||
func (_q *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery {
|
func (dpq *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery {
|
||||||
_q.order = append(_q.order, o...)
|
dpq.order = append(dpq.order, o...)
|
||||||
return _q
|
return dpq
|
||||||
}
|
}
|
||||||
|
|
||||||
// First returns the first DBPackage entity from the query.
|
// First returns the first DBPackage entity from the query.
|
||||||
// Returns a *NotFoundError when no DBPackage was found.
|
// Returns a *NotFoundError when no DBPackage was found.
|
||||||
func (_q *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
|
func (dpq *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
|
||||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
nodes, err := dpq.Limit(1).All(setContextOp(ctx, dpq.ctx, "First"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -73,8 +72,8 @@ func (_q *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FirstX is like First, but panics if an error occurs.
|
// FirstX is like First, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
|
func (dpq *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
|
||||||
node, err := _q.First(ctx)
|
node, err := dpq.First(ctx)
|
||||||
if err != nil && !IsNotFound(err) {
|
if err != nil && !IsNotFound(err) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -83,9 +82,9 @@ func (_q *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
|
|||||||
|
|
||||||
// FirstID returns the first DBPackage ID from the query.
|
// FirstID returns the first DBPackage ID from the query.
|
||||||
// Returns a *NotFoundError when no DBPackage ID was found.
|
// Returns a *NotFoundError when no DBPackage ID was found.
|
||||||
func (_q *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
func (dpq *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||||
var ids []int
|
var ids []int
|
||||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
if ids, err = dpq.Limit(1).IDs(setContextOp(ctx, dpq.ctx, "FirstID")); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(ids) == 0 {
|
if len(ids) == 0 {
|
||||||
@@ -96,8 +95,8 @@ func (_q *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) FirstIDX(ctx context.Context) int {
|
func (dpq *DBPackageQuery) FirstIDX(ctx context.Context) int {
|
||||||
id, err := _q.FirstID(ctx)
|
id, err := dpq.FirstID(ctx)
|
||||||
if err != nil && !IsNotFound(err) {
|
if err != nil && !IsNotFound(err) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -107,8 +106,8 @@ func (_q *DBPackageQuery) FirstIDX(ctx context.Context) int {
|
|||||||
// Only returns a single DBPackage entity found by the query, ensuring it only returns one.
|
// Only returns a single DBPackage entity found by the query, ensuring it only returns one.
|
||||||
// Returns a *NotSingularError when more than one DBPackage entity is found.
|
// Returns a *NotSingularError when more than one DBPackage entity is found.
|
||||||
// Returns a *NotFoundError when no DBPackage entities are found.
|
// Returns a *NotFoundError when no DBPackage entities are found.
|
||||||
func (_q *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
|
func (dpq *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
|
||||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
nodes, err := dpq.Limit(2).All(setContextOp(ctx, dpq.ctx, "Only"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -123,8 +122,8 @@ func (_q *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnlyX is like Only, but panics if an error occurs.
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
|
func (dpq *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
|
||||||
node, err := _q.Only(ctx)
|
node, err := dpq.Only(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -134,9 +133,9 @@ func (_q *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
|
|||||||
// OnlyID is like Only, but returns the only DBPackage ID in the query.
|
// OnlyID is like Only, but returns the only DBPackage ID in the query.
|
||||||
// Returns a *NotSingularError when more than one DBPackage ID is found.
|
// Returns a *NotSingularError when more than one DBPackage ID is found.
|
||||||
// Returns a *NotFoundError when no entities are found.
|
// Returns a *NotFoundError when no entities are found.
|
||||||
func (_q *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
func (dpq *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||||
var ids []int
|
var ids []int
|
||||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
if ids, err = dpq.Limit(2).IDs(setContextOp(ctx, dpq.ctx, "OnlyID")); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
switch len(ids) {
|
switch len(ids) {
|
||||||
@@ -151,8 +150,8 @@ func (_q *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) OnlyIDX(ctx context.Context) int {
|
func (dpq *DBPackageQuery) OnlyIDX(ctx context.Context) int {
|
||||||
id, err := _q.OnlyID(ctx)
|
id, err := dpq.OnlyID(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -160,18 +159,18 @@ func (_q *DBPackageQuery) OnlyIDX(ctx context.Context) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// All executes the query and returns a list of DBPackages.
|
// All executes the query and returns a list of DBPackages.
|
||||||
func (_q *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) {
|
func (dpq *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) {
|
||||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
ctx = setContextOp(ctx, dpq.ctx, "All")
|
||||||
if err := _q.prepareQuery(ctx); err != nil {
|
if err := dpq.prepareQuery(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
qr := querierAll[[]*DBPackage, *DBPackageQuery]()
|
qr := querierAll[[]*DBPackage, *DBPackageQuery]()
|
||||||
return withInterceptors[[]*DBPackage](ctx, _q, qr, _q.inters)
|
return withInterceptors[[]*DBPackage](ctx, dpq, qr, dpq.inters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllX is like All, but panics if an error occurs.
|
// AllX is like All, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
|
func (dpq *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
|
||||||
nodes, err := _q.All(ctx)
|
nodes, err := dpq.All(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -179,20 +178,20 @@ func (_q *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IDs executes the query and returns a list of DBPackage IDs.
|
// IDs executes the query and returns a list of DBPackage IDs.
|
||||||
func (_q *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
|
func (dpq *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||||
if _q.ctx.Unique == nil && _q.path != nil {
|
if dpq.ctx.Unique == nil && dpq.path != nil {
|
||||||
_q.Unique(true)
|
dpq.Unique(true)
|
||||||
}
|
}
|
||||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
ctx = setContextOp(ctx, dpq.ctx, "IDs")
|
||||||
if err = _q.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil {
|
if err = dpq.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return ids, nil
|
return ids, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IDsX is like IDs, but panics if an error occurs.
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) IDsX(ctx context.Context) []int {
|
func (dpq *DBPackageQuery) IDsX(ctx context.Context) []int {
|
||||||
ids, err := _q.IDs(ctx)
|
ids, err := dpq.IDs(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -200,17 +199,17 @@ func (_q *DBPackageQuery) IDsX(ctx context.Context) []int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Count returns the count of the given query.
|
// Count returns the count of the given query.
|
||||||
func (_q *DBPackageQuery) Count(ctx context.Context) (int, error) {
|
func (dpq *DBPackageQuery) Count(ctx context.Context) (int, error) {
|
||||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
ctx = setContextOp(ctx, dpq.ctx, "Count")
|
||||||
if err := _q.prepareQuery(ctx); err != nil {
|
if err := dpq.prepareQuery(ctx); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return withInterceptors[int](ctx, _q, querierCount[*DBPackageQuery](), _q.inters)
|
return withInterceptors[int](ctx, dpq, querierCount[*DBPackageQuery](), dpq.inters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountX is like Count, but panics if an error occurs.
|
// CountX is like Count, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) CountX(ctx context.Context) int {
|
func (dpq *DBPackageQuery) CountX(ctx context.Context) int {
|
||||||
count, err := _q.Count(ctx)
|
count, err := dpq.Count(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -218,9 +217,9 @@ func (_q *DBPackageQuery) CountX(ctx context.Context) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Exist returns true if the query has elements in the graph.
|
// Exist returns true if the query has elements in the graph.
|
||||||
func (_q *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
|
func (dpq *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
ctx = setContextOp(ctx, dpq.ctx, "Exist")
|
||||||
switch _, err := _q.FirstID(ctx); {
|
switch _, err := dpq.FirstID(ctx); {
|
||||||
case IsNotFound(err):
|
case IsNotFound(err):
|
||||||
return false, nil
|
return false, nil
|
||||||
case err != nil:
|
case err != nil:
|
||||||
@@ -231,8 +230,8 @@ func (_q *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ExistX is like Exist, but panics if an error occurs.
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
func (_q *DBPackageQuery) ExistX(ctx context.Context) bool {
|
func (dpq *DBPackageQuery) ExistX(ctx context.Context) bool {
|
||||||
exist, err := _q.Exist(ctx)
|
exist, err := dpq.Exist(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -241,20 +240,19 @@ func (_q *DBPackageQuery) ExistX(ctx context.Context) bool {
|
|||||||
|
|
||||||
// Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be
|
// Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be
|
||||||
// used to prepare common query builders and use them differently after the clone is made.
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
func (_q *DBPackageQuery) Clone() *DBPackageQuery {
|
func (dpq *DBPackageQuery) Clone() *DBPackageQuery {
|
||||||
if _q == nil {
|
if dpq == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &DBPackageQuery{
|
return &DBPackageQuery{
|
||||||
config: _q.config,
|
config: dpq.config,
|
||||||
ctx: _q.ctx.Clone(),
|
ctx: dpq.ctx.Clone(),
|
||||||
order: append([]dbpackage.OrderOption{}, _q.order...),
|
order: append([]dbpackage.OrderOption{}, dpq.order...),
|
||||||
inters: append([]Interceptor{}, _q.inters...),
|
inters: append([]Interceptor{}, dpq.inters...),
|
||||||
predicates: append([]predicate.DBPackage{}, _q.predicates...),
|
predicates: append([]predicate.DBPackage{}, dpq.predicates...),
|
||||||
// clone intermediate query.
|
// clone intermediate query.
|
||||||
sql: _q.sql.Clone(),
|
sql: dpq.sql.Clone(),
|
||||||
path: _q.path,
|
path: dpq.path,
|
||||||
modifiers: append([]func(*sql.Selector){}, _q.modifiers...),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,10 +270,10 @@ func (_q *DBPackageQuery) Clone() *DBPackageQuery {
|
|||||||
// GroupBy(dbpackage.FieldPkgbase).
|
// GroupBy(dbpackage.FieldPkgbase).
|
||||||
// Aggregate(ent.Count()).
|
// Aggregate(ent.Count()).
|
||||||
// Scan(ctx, &v)
|
// Scan(ctx, &v)
|
||||||
func (_q *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy {
|
func (dpq *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy {
|
||||||
_q.ctx.Fields = append([]string{field}, fields...)
|
dpq.ctx.Fields = append([]string{field}, fields...)
|
||||||
grbuild := &DBPackageGroupBy{build: _q}
|
grbuild := &DBPackageGroupBy{build: dpq}
|
||||||
grbuild.flds = &_q.ctx.Fields
|
grbuild.flds = &dpq.ctx.Fields
|
||||||
grbuild.label = dbpackage.Label
|
grbuild.label = dbpackage.Label
|
||||||
grbuild.scan = grbuild.Scan
|
grbuild.scan = grbuild.Scan
|
||||||
return grbuild
|
return grbuild
|
||||||
@@ -293,65 +291,65 @@ func (_q *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGrou
|
|||||||
// client.DBPackage.Query().
|
// client.DBPackage.Query().
|
||||||
// Select(dbpackage.FieldPkgbase).
|
// Select(dbpackage.FieldPkgbase).
|
||||||
// Scan(ctx, &v)
|
// Scan(ctx, &v)
|
||||||
func (_q *DBPackageQuery) Select(fields ...string) *DBPackageSelect {
|
func (dpq *DBPackageQuery) Select(fields ...string) *DBPackageSelect {
|
||||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
dpq.ctx.Fields = append(dpq.ctx.Fields, fields...)
|
||||||
sbuild := &DBPackageSelect{DBPackageQuery: _q}
|
sbuild := &DBPackageSelect{DBPackageQuery: dpq}
|
||||||
sbuild.label = dbpackage.Label
|
sbuild.label = dbpackage.Label
|
||||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
sbuild.flds, sbuild.scan = &dpq.ctx.Fields, sbuild.Scan
|
||||||
return sbuild
|
return sbuild
|
||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate returns a DBPackageSelect configured with the given aggregations.
|
// Aggregate returns a DBPackageSelect configured with the given aggregations.
|
||||||
func (_q *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
func (dpq *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
||||||
return _q.Select().Aggregate(fns...)
|
return dpq.Select().Aggregate(fns...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_q *DBPackageQuery) prepareQuery(ctx context.Context) error {
|
func (dpq *DBPackageQuery) prepareQuery(ctx context.Context) error {
|
||||||
for _, inter := range _q.inters {
|
for _, inter := range dpq.inters {
|
||||||
if inter == nil {
|
if inter == nil {
|
||||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
}
|
}
|
||||||
if trv, ok := inter.(Traverser); ok {
|
if trv, ok := inter.(Traverser); ok {
|
||||||
if err := trv.Traverse(ctx, _q); err != nil {
|
if err := trv.Traverse(ctx, dpq); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, f := range _q.ctx.Fields {
|
for _, f := range dpq.ctx.Fields {
|
||||||
if !dbpackage.ValidColumn(f) {
|
if !dbpackage.ValidColumn(f) {
|
||||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _q.path != nil {
|
if dpq.path != nil {
|
||||||
prev, err := _q.path(ctx)
|
prev, err := dpq.path(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_q.sql = prev
|
dpq.sql = prev
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_q *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) {
|
func (dpq *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) {
|
||||||
var (
|
var (
|
||||||
nodes = []*DBPackage{}
|
nodes = []*DBPackage{}
|
||||||
_spec = _q.querySpec()
|
_spec = dpq.querySpec()
|
||||||
)
|
)
|
||||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
return (*DBPackage).scanValues(nil, columns)
|
return (*DBPackage).scanValues(nil, columns)
|
||||||
}
|
}
|
||||||
_spec.Assign = func(columns []string, values []any) error {
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
node := &DBPackage{config: _q.config}
|
node := &DBPackage{config: dpq.config}
|
||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
if len(_q.modifiers) > 0 {
|
if len(dpq.modifiers) > 0 {
|
||||||
_spec.Modifiers = _q.modifiers
|
_spec.Modifiers = dpq.modifiers
|
||||||
}
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
if err := sqlgraph.QueryNodes(ctx, dpq.driver, _spec); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
@@ -360,27 +358,27 @@ func (_q *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DB
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_q *DBPackageQuery) sqlCount(ctx context.Context) (int, error) {
|
func (dpq *DBPackageQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := dpq.querySpec()
|
||||||
if len(_q.modifiers) > 0 {
|
if len(dpq.modifiers) > 0 {
|
||||||
_spec.Modifiers = _q.modifiers
|
_spec.Modifiers = dpq.modifiers
|
||||||
}
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = dpq.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(dpq.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = dpq.ctx.Unique != nil && *dpq.ctx.Unique
|
||||||
}
|
}
|
||||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
return sqlgraph.CountNodes(ctx, dpq.driver, _spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_q *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
_spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
_spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||||
_spec.From = _q.sql
|
_spec.From = dpq.sql
|
||||||
if unique := _q.ctx.Unique; unique != nil {
|
if unique := dpq.ctx.Unique; unique != nil {
|
||||||
_spec.Unique = *unique
|
_spec.Unique = *unique
|
||||||
} else if _q.path != nil {
|
} else if dpq.path != nil {
|
||||||
_spec.Unique = true
|
_spec.Unique = true
|
||||||
}
|
}
|
||||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
if fields := dpq.ctx.Fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, dbpackage.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, dbpackage.FieldID)
|
||||||
for i := range fields {
|
for i := range fields {
|
||||||
@@ -389,20 +387,20 @@ func (_q *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ps := _q.predicates; len(ps) > 0 {
|
if ps := dpq.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if limit := _q.ctx.Limit; limit != nil {
|
if limit := dpq.ctx.Limit; limit != nil {
|
||||||
_spec.Limit = *limit
|
_spec.Limit = *limit
|
||||||
}
|
}
|
||||||
if offset := _q.ctx.Offset; offset != nil {
|
if offset := dpq.ctx.Offset; offset != nil {
|
||||||
_spec.Offset = *offset
|
_spec.Offset = *offset
|
||||||
}
|
}
|
||||||
if ps := _q.order; len(ps) > 0 {
|
if ps := dpq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector)
|
ps[i](selector)
|
||||||
@@ -412,45 +410,45 @@ func (_q *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
return _spec
|
return _spec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_q *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
func (dpq *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
builder := sql.Dialect(_q.driver.Dialect())
|
builder := sql.Dialect(dpq.driver.Dialect())
|
||||||
t1 := builder.Table(dbpackage.Table)
|
t1 := builder.Table(dbpackage.Table)
|
||||||
columns := _q.ctx.Fields
|
columns := dpq.ctx.Fields
|
||||||
if len(columns) == 0 {
|
if len(columns) == 0 {
|
||||||
columns = dbpackage.Columns
|
columns = dbpackage.Columns
|
||||||
}
|
}
|
||||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
if _q.sql != nil {
|
if dpq.sql != nil {
|
||||||
selector = _q.sql
|
selector = dpq.sql
|
||||||
selector.Select(selector.Columns(columns...)...)
|
selector.Select(selector.Columns(columns...)...)
|
||||||
}
|
}
|
||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if dpq.ctx.Unique != nil && *dpq.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
for _, m := range _q.modifiers {
|
for _, m := range dpq.modifiers {
|
||||||
m(selector)
|
m(selector)
|
||||||
}
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range dpq.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range _q.order {
|
for _, p := range dpq.order {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := _q.ctx.Offset; offset != nil {
|
if offset := dpq.ctx.Offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
// with default value, and override it below if needed.
|
// with default value, and override it below if needed.
|
||||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
}
|
}
|
||||||
if limit := _q.ctx.Limit; limit != nil {
|
if limit := dpq.ctx.Limit; limit != nil {
|
||||||
selector.Limit(*limit)
|
selector.Limit(*limit)
|
||||||
}
|
}
|
||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modify adds a query modifier for attaching custom logic to queries.
|
// Modify adds a query modifier for attaching custom logic to queries.
|
||||||
func (_q *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
func (dpq *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
||||||
_q.modifiers = append(_q.modifiers, modifiers...)
|
dpq.modifiers = append(dpq.modifiers, modifiers...)
|
||||||
return _q.Select()
|
return dpq.Select()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBPackageGroupBy is the group-by builder for DBPackage entities.
|
// DBPackageGroupBy is the group-by builder for DBPackage entities.
|
||||||
@@ -460,41 +458,41 @@ type DBPackageGroupBy struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the group-by query.
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
func (_g *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy {
|
func (dpgb *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy {
|
||||||
_g.fns = append(_g.fns, fns...)
|
dpgb.fns = append(dpgb.fns, fns...)
|
||||||
return _g
|
return dpgb
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
func (_g *DBPackageGroupBy) Scan(ctx context.Context, v any) error {
|
func (dpgb *DBPackageGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
ctx = setContextOp(ctx, dpgb.build.ctx, "GroupBy")
|
||||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
if err := dpgb.build.prepareQuery(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_g *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
func (dpgb *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
||||||
selector := root.sqlQuery(ctx).Select()
|
selector := root.sqlQuery(ctx).Select()
|
||||||
aggregation := make([]string, 0, len(_g.fns))
|
aggregation := make([]string, 0, len(dpgb.fns))
|
||||||
for _, fn := range _g.fns {
|
for _, fn := range dpgb.fns {
|
||||||
aggregation = append(aggregation, fn(selector))
|
aggregation = append(aggregation, fn(selector))
|
||||||
}
|
}
|
||||||
if len(selector.SelectedColumns()) == 0 {
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
columns := make([]string, 0, len(*dpgb.flds)+len(dpgb.fns))
|
||||||
for _, f := range *_g.flds {
|
for _, f := range *dpgb.flds {
|
||||||
columns = append(columns, selector.C(f))
|
columns = append(columns, selector.C(f))
|
||||||
}
|
}
|
||||||
columns = append(columns, aggregation...)
|
columns = append(columns, aggregation...)
|
||||||
selector.Select(columns...)
|
selector.Select(columns...)
|
||||||
}
|
}
|
||||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
selector.GroupBy(selector.Columns(*dpgb.flds...)...)
|
||||||
if err := selector.Err(); err != nil {
|
if err := selector.Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rows := &sql.Rows{}
|
rows := &sql.Rows{}
|
||||||
query, args := selector.Query()
|
query, args := selector.Query()
|
||||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
if err := dpgb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
@@ -508,27 +506,27 @@ type DBPackageSelect struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the selector query.
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
func (_s *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
func (dps *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
||||||
_s.fns = append(_s.fns, fns...)
|
dps.fns = append(dps.fns, fns...)
|
||||||
return _s
|
return dps
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
func (_s *DBPackageSelect) Scan(ctx context.Context, v any) error {
|
func (dps *DBPackageSelect) Scan(ctx context.Context, v any) error {
|
||||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
ctx = setContextOp(ctx, dps.ctx, "Select")
|
||||||
if err := _s.prepareQuery(ctx); err != nil {
|
if err := dps.prepareQuery(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, _s.DBPackageQuery, _s, _s.inters, v)
|
return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, dps.DBPackageQuery, dps, dps.inters, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_s *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
||||||
selector := root.sqlQuery(ctx)
|
selector := root.sqlQuery(ctx)
|
||||||
aggregation := make([]string, 0, len(_s.fns))
|
aggregation := make([]string, 0, len(dps.fns))
|
||||||
for _, fn := range _s.fns {
|
for _, fn := range dps.fns {
|
||||||
aggregation = append(aggregation, fn(selector))
|
aggregation = append(aggregation, fn(selector))
|
||||||
}
|
}
|
||||||
switch n := len(*_s.selector.flds); {
|
switch n := len(*dps.selector.flds); {
|
||||||
case n == 0 && len(aggregation) > 0:
|
case n == 0 && len(aggregation) > 0:
|
||||||
selector.Select(aggregation...)
|
selector.Select(aggregation...)
|
||||||
case n != 0 && len(aggregation) > 0:
|
case n != 0 && len(aggregation) > 0:
|
||||||
@@ -536,7 +534,7 @@ func (_s *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v
|
|||||||
}
|
}
|
||||||
rows := &sql.Rows{}
|
rows := &sql.Rows{}
|
||||||
query, args := selector.Query()
|
query, args := selector.Query()
|
||||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
if err := dps.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
@@ -544,7 +542,7 @@ func (_s *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Modify adds a query modifier for attaching custom logic to queries.
|
// Modify adds a query modifier for attaching custom logic to queries.
|
||||||
func (_s *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
func (dps *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
||||||
_s.modifiers = append(_s.modifiers, modifiers...)
|
dps.modifiers = append(dps.modifiers, modifiers...)
|
||||||
return _s
|
return dps
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -69,14 +69,14 @@ var (
|
|||||||
columnCheck sql.ColumnCheck
|
columnCheck sql.ColumnCheck
|
||||||
)
|
)
|
||||||
|
|
||||||
// checkColumn checks if the column exists in the given table.
|
// columnChecker checks if the column exists in the given table.
|
||||||
func checkColumn(t, c string) error {
|
func checkColumn(table, column string) error {
|
||||||
initCheck.Do(func() {
|
initCheck.Do(func() {
|
||||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||||
dbpackage.Table: dbpackage.ValidColumn,
|
dbpackage.Table: dbpackage.ValidColumn,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
return columnCheck(t, c)
|
return columnCheck(table, column)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Asc applies the given fields in ASC order.
|
// Asc applies the given fields in ASC order.
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ var (
|
|||||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||||
{Name: "pkgbase", Type: field.TypeString},
|
{Name: "pkgbase", Type: field.TypeString},
|
||||||
{Name: "packages", Type: field.TypeJSON, Nullable: true},
|
{Name: "packages", Type: field.TypeJSON, Nullable: true},
|
||||||
{Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "built", "queued", "delayed", "building", "latest", "signing", "unknown"}, Default: "unknown"},
|
{Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "build", "queued", "delayed", "building", "latest", "signing", "unknown"}, Default: "unknown"},
|
||||||
{Name: "skip_reason", Type: field.TypeString, Nullable: true},
|
{Name: "skip_reason", Type: field.TypeString, Nullable: true},
|
||||||
{Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core", "multilib"}},
|
{Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core", "multilib"}},
|
||||||
{Name: "march", Type: field.TypeString},
|
{Name: "march", Type: field.TypeString},
|
||||||
|
|||||||
@@ -5,6 +5,6 @@ package runtime
|
|||||||
// The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go
|
// The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Version = "v0.14.5" // Version of ent codegen.
|
Version = "v0.12.3" // Version of ent codegen.
|
||||||
Sum = "h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=" // Sum of ent codegen.
|
Sum = "h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk=" // Sum of ent codegen.
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ func (DBPackage) Fields() []ent.Field {
|
|||||||
return []ent.Field{
|
return []ent.Field{
|
||||||
field.String("pkgbase").NotEmpty().Immutable(),
|
field.String("pkgbase").NotEmpty().Immutable(),
|
||||||
field.Strings("packages").Optional(),
|
field.Strings("packages").Optional(),
|
||||||
field.Enum("status").Values("skipped", "failed", "built", "queued", "delayed", "building",
|
field.Enum("status").Values("skipped", "failed", "build", "queued", "delayed", "building",
|
||||||
"latest", "signing", "unknown").Default("unknown").Optional(),
|
"latest", "signing", "unknown").Default("unknown").Optional(),
|
||||||
field.String("skip_reason").Optional(),
|
field.String("skip_reason").Optional(),
|
||||||
field.Enum("repository").Values("extra", "core", "multilib"),
|
field.Enum("repository").Values("extra", "core", "multilib"),
|
||||||
|
|||||||
16
flags.yaml
16
flags.yaml
@@ -20,19 +20,15 @@ common:
|
|||||||
packager: "ALHP $march$ <alhp@harting.dev>"
|
packager: "ALHP $march$ <alhp@harting.dev>"
|
||||||
makeflags: "-j$buildproc$"
|
makeflags: "-j$buildproc$"
|
||||||
# https://somegit.dev/ALHP/ALHP.GO/issues/110
|
# https://somegit.dev/ALHP/ALHP.GO/issues/110
|
||||||
rustflags:
|
rustflags: "-Copt-level=3 -Ctarget-cpu=$march$ -Clink-arg=-Wl,-z,pack-relative-relocs"
|
||||||
- "-Copt-level=3"
|
|
||||||
- "-Ctarget-cpu=$march$"
|
|
||||||
- "-Clink-arg=-z"
|
|
||||||
- "-Clink-arg=pack-relative-relocs"
|
|
||||||
ltoflags:
|
ltoflags:
|
||||||
- "-falign-functions=32" # https://github.com/InBetweenNames/gentooLTO/issues/164
|
- "-falign-functions=32" # https://github.com/InBetweenNames/gentooLTO/issues/164
|
||||||
kcflags: " -march=$march$ -O3"
|
kcflags: " -march=$march$ -O3"
|
||||||
kcppflags: " -march=$march$ -O3"
|
kcppflags: " -march=$march$ -O3"
|
||||||
fcflags: "$FFLAGS"
|
fcflags: "$CFLAGS"
|
||||||
fflags:
|
fflags: "$CFLAGS"
|
||||||
- "-O2": "-O3"
|
ldflags:
|
||||||
- "-march=$march$"
|
- "-z,pack-relative-relocs"
|
||||||
|
|
||||||
lto:
|
lto:
|
||||||
rustflags:
|
rustflags:
|
||||||
@@ -41,4 +37,4 @@ lto:
|
|||||||
options:
|
options:
|
||||||
- "!lto": "lto"
|
- "!lto": "lto"
|
||||||
|
|
||||||
cargo_profile_release_lto: "fat"
|
cargo_profile_release_lto: "fat"
|
||||||
64
go.mod
64
go.mod
@@ -1,57 +1,41 @@
|
|||||||
module somegit.dev/ALHP/ALHP.GO
|
module somegit.dev/ALHP/ALHP.GO
|
||||||
|
|
||||||
go 1.24.6
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
entgo.io/ent v0.14.5
|
entgo.io/ent v0.12.3
|
||||||
github.com/Jguer/go-alpm/v2 v2.2.2
|
github.com/Jguer/go-alpm/v2 v2.2.0
|
||||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5
|
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5
|
||||||
github.com/Morganamilo/go-srcinfo v1.0.0
|
github.com/Morganamilo/go-srcinfo v1.0.0
|
||||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
|
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
|
||||||
github.com/gobwas/glob v0.2.3
|
github.com/google/uuid v1.3.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/jackc/pgx/v4 v4.18.1
|
||||||
github.com/jackc/pgx/v4 v4.18.3
|
github.com/otiai10/copy v1.11.0
|
||||||
github.com/otiai10/copy v1.14.1
|
github.com/sethvargo/go-retry v0.2.4
|
||||||
github.com/prometheus/client_golang v1.23.2
|
github.com/sirupsen/logrus v1.9.2
|
||||||
github.com/sethvargo/go-retry v0.3.0
|
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
|
||||||
github.com/sirupsen/logrus v1.9.3
|
|
||||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
ariga.io/atlas v0.37.0 // indirect
|
ariga.io/atlas v0.11.0 // indirect
|
||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
github.com/agext/levenshtein v1.2.3 // indirect
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||||
github.com/go-openapi/inflect v0.21.3 // indirect
|
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||||
github.com/google/go-cmp v0.7.0 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/hashicorp/hcl/v2 v2.24.0 // indirect
|
github.com/hashicorp/hcl/v2 v2.16.2 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.14.3 // indirect
|
github.com/jackc/pgconn v1.14.0 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||||
github.com/jackc/pgtype v1.14.4 // indirect
|
github.com/jackc/pgtype v1.14.0 // indirect
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/zclconf/go-cty v1.13.2 // indirect
|
||||||
github.com/otiai10/mint v1.6.3 // indirect
|
golang.org/x/crypto v0.9.0 // indirect
|
||||||
github.com/prometheus/client_model v0.6.2 // indirect
|
golang.org/x/mod v0.10.0 // indirect
|
||||||
github.com/prometheus/common v0.67.1 // indirect
|
golang.org/x/sys v0.8.0 // indirect
|
||||||
github.com/prometheus/procfs v0.17.0 // indirect
|
golang.org/x/text v0.9.0 // indirect
|
||||||
github.com/zclconf/go-cty v1.17.0 // indirect
|
|
||||||
github.com/zclconf/go-cty-yaml v1.1.0 // indirect
|
|
||||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
|
||||||
golang.org/x/crypto v0.43.0 // indirect
|
|
||||||
golang.org/x/mod v0.29.0 // indirect
|
|
||||||
golang.org/x/sync v0.17.0 // indirect
|
|
||||||
golang.org/x/sys v0.37.0 // indirect
|
|
||||||
golang.org/x/text v0.30.0 // indirect
|
|
||||||
golang.org/x/tools v0.38.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.36.10 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
167
go.sum
167
go.sum
@@ -1,12 +1,11 @@
|
|||||||
ariga.io/atlas v0.37.0 h1:MvbQ25CAHFslttEKEySwYNFrFUdLAPhtU1izOzjXV+o=
|
ariga.io/atlas v0.11.0 h1:aGR7MzsUfmdlDYCpRErQeY2NSuRlPE0/q6drNE/5buM=
|
||||||
ariga.io/atlas v0.37.0/go.mod h1:mHE83ptCxEkd3rO3c7Rvkk6Djf6mVhEiSVhoiNu96CI=
|
ariga.io/atlas v0.11.0/go.mod h1:+TR129FJZ5Lvzms6dvCeGWh1yR6hMvmXBhug4hrNIGk=
|
||||||
entgo.io/ent v0.14.5 h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=
|
entgo.io/ent v0.12.3 h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk=
|
||||||
entgo.io/ent v0.14.5/go.mod h1:zTzLmWtPvGpmSwtkaayM2cm5m819NdM7z7tYPq3vN0U=
|
entgo.io/ent v0.12.3/go.mod h1:AigGGx+tbrBBYHAzGOg8ND661E5cxx1Uiu5o/otJ6Yg=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
github.com/Jguer/go-alpm/v2 v2.2.0 h1:+sh4UEZwTpcAO+vHdySsnLZSnLZIBun8j85BbPExSlg=
|
||||||
github.com/Jguer/go-alpm/v2 v2.2.2 h1:sPwUoZp1X5Tw6K6Ba1lWvVJfcgVNEGVcxARLBttZnC0=
|
github.com/Jguer/go-alpm/v2 v2.2.0/go.mod h1:uLQcTMNM904dRiGU+/JDtDdd7Nd8mVbEVaHjhmziT7w=
|
||||||
github.com/Jguer/go-alpm/v2 v2.2.2/go.mod h1:lfe8gSe83F/KERaQvEfrSqQ4n+8bES+ZIyKWR/gm3MI=
|
|
||||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c=
|
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c=
|
||||||
@@ -15,16 +14,10 @@ github.com/Morganamilo/go-srcinfo v1.0.0 h1:Wh4nEF+HJWo+29hnxM18Q2hi+DUf0GejS13+
|
|||||||
github.com/Morganamilo/go-srcinfo v1.0.0/go.mod h1:MP6VGY1NNpVUmYIEgoM9acix95KQqIRyqQ0hCLsyYUY=
|
github.com/Morganamilo/go-srcinfo v1.0.0/go.mod h1:MP6VGY1NNpVUmYIEgoM9acix95KQqIRyqQ0hCLsyYUY=
|
||||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||||
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
|
|
||||||
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
|
||||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
|
|
||||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
@@ -37,22 +30,19 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-openapi/inflect v0.21.3 h1:TmQvw+9eLrsNp4X0BBQacEZZtAnzk2z1FaLdQQJsDiU=
|
github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
|
||||||
github.com/go-openapi/inflect v0.21.3/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE=
|
github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0=
|
||||||
github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM=
|
github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng=
|
||||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||||
@@ -63,8 +53,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU
|
|||||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||||
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
||||||
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||||
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
|
github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q=
|
||||||
github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
|
github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
|
||||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||||
@@ -80,45 +70,37 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW
|
|||||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
|
github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0=
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||||
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
||||||
|
github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw=
|
||||||
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||||
github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
|
|
||||||
github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||||
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
||||||
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0=
|
||||||
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
|
github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
|
||||||
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
|
||||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
@@ -129,52 +111,31 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
|
|||||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
|
||||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
|
||||||
github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
|
|
||||||
github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
|
|
||||||
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
|
|
||||||
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
|
||||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
|
||||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
|
||||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
|
||||||
github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI=
|
|
||||||
github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q=
|
|
||||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
|
||||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||||
|
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
|
||||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
@@ -188,24 +149,17 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 h1:shC1HB1UogxN5Ech3Yqaaxj1X/P656PPCB4RbojIJqc=
|
||||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117 h1:67A5tweHp3C7osHjrYsy6pQZ00bYkTTttZ7kiOwwHeA=
|
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
||||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0=
|
github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0=
|
||||||
github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U=
|
github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0=
|
||||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
|
|
||||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
|
|
||||||
github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0=
|
|
||||||
github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
|
|
||||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
@@ -213,8 +167,6 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
|
|||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
|
||||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
@@ -225,17 +177,15 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP
|
|||||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@@ -243,13 +193,8 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
|
||||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@@ -266,16 +211,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
|
||||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
@@ -283,10 +224,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
|
||||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
|
||||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
@@ -297,20 +236,14 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
|
||||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
|
||||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
|
||||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|||||||
122
housekeeping.go
122
housekeeping.go
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||||
@@ -13,7 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) error {
|
func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
fullRepo := repo + "-" + march
|
fullRepo := repo + "-" + march
|
||||||
log.Debugf("[%s] start housekeeping", fullRepo)
|
log.Debugf("[%s] start housekeeping", fullRepo)
|
||||||
@@ -26,7 +25,7 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
for _, path := range packages {
|
for _, path := range packages {
|
||||||
mPackage := Package(path)
|
mPackage := Package(path)
|
||||||
|
|
||||||
dbPkg, err := mPackage.DBPackage(ctx, db)
|
dbPkg, err := mPackage.DBPackage(db)
|
||||||
if ent.IsNotFound(err) {
|
if ent.IsNotFound(err) {
|
||||||
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
|
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
|
||||||
pkg := &ProtoPackage{
|
pkg := &ProtoPackage{
|
||||||
@@ -37,7 +36,7 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Warningf("[HK] error fetching %s->%q from db: %v", fullRepo, path, err)
|
log.Warningf("[HK] Problem fetching %s->%q from db: %v", fullRepo, path, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,40 +57,12 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
buildManager.alpmMutex.Lock()
|
buildManager.alpmMutex.Lock()
|
||||||
pkgResolved, err := dbs.FindSatisfier(mPackage.Name())
|
pkgResolved, err := dbs.FindSatisfier(mPackage.Name())
|
||||||
buildManager.alpmMutex.Unlock()
|
buildManager.alpmMutex.Unlock()
|
||||||
if err != nil ||
|
if err != nil || pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() ||
|
||||||
pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() ||
|
pkgResolved.Architecture() != pkg.Arch || pkgResolved.Name() != mPackage.Name() {
|
||||||
pkgResolved.DB().Name() != pkg.Repo.String() ||
|
|
||||||
pkgResolved.Architecture() != pkg.Arch ||
|
|
||||||
pkgResolved.Name() != mPackage.Name() ||
|
|
||||||
MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages) {
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (resolve error: %v)", pkg.FullRepo, mPackage.Name(), err)
|
|
||||||
case pkgResolved.DB().Name() != pkg.DBPackage.Repository.String():
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (repo mismatch: repo:%s != db:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.DB().Name(), pkg.DBPackage.Repository.String())
|
|
||||||
case pkgResolved.DB().Name() != pkg.Repo.String():
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (repo mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.DB().Name(), pkg.Repo.String())
|
|
||||||
case pkgResolved.Architecture() != pkg.Arch:
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (arch mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.Architecture(), pkg.Arch)
|
|
||||||
case pkgResolved.Name() != mPackage.Name():
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (name mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
|
||||||
mPackage.Name(), pkgResolved.Name(), mPackage.Name())
|
|
||||||
case MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
|
|
||||||
log.Infof("[HK] %s->%s not included in repo (blacklisted pkgbase %s)", pkg.FullRepo, mPackage.Name(), pkg.Pkgbase)
|
|
||||||
}
|
|
||||||
|
|
||||||
// package not found on mirror/db -> not part of any repo anymore
|
// package not found on mirror/db -> not part of any repo anymore
|
||||||
err = pkg.findPkgFiles()
|
log.Infof("[HK] %s->%s not included in repo", pkg.FullRepo, mPackage.Name())
|
||||||
if err != nil {
|
|
||||||
log.Errorf("[HK] %s->%s unable to get pkg-files: %v", pkg.FullRepo, mPackage.Name(), err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(ctx)
|
|
||||||
pkg.DBPackage = nil
|
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
|
err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -99,7 +70,7 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) {
|
if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) {
|
||||||
err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(ctx)
|
err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -147,7 +118,7 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
DBPackage: dbPkg,
|
DBPackage: dbPkg,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !pkg.isAvailable(ctx, alpmHandle) {
|
if !pkg.isAvailable(alpmHandle) {
|
||||||
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
|
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
|
||||||
err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
|
err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -161,11 +132,9 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
// check lastVersionBuild
|
// check lastVersionBuild
|
||||||
if dbPkg.LastVersionBuild != dbPkg.RepoVersion {
|
if dbPkg.LastVersionBuild != dbPkg.RepoVersion {
|
||||||
log.Infof("[HK] %s->%s updating lastVersionBuild %s -> %s", fullRepo, dbPkg.Pkgbase, dbPkg.LastVersionBuild, dbPkg.RepoVersion)
|
log.Infof("[HK] %s->%s updating lastVersionBuild %s -> %s", fullRepo, dbPkg.Pkgbase, dbPkg.LastVersionBuild, dbPkg.RepoVersion)
|
||||||
nDBPkg, err := dbPkg.Update().SetLastVersionBuild(dbPkg.RepoVersion).Save(ctx)
|
dbPkg, err = dbPkg.Update().SetLastVersionBuild(dbPkg.RepoVersion).Save(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("[HK] error updating lastVersionBuild for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
log.Warningf("[HK] error updating lastVersionBuild for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
||||||
} else {
|
|
||||||
dbPkg = nDBPkg
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +159,7 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
ClearRepoVersion().
|
ClearRepoVersion().
|
||||||
ClearTagRev().
|
ClearTagRev().
|
||||||
SetStatus(dbpackage.StatusQueued).
|
SetStatus(dbpackage.StatusQueued).
|
||||||
Save(ctx)
|
Save(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -203,56 +172,14 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
}
|
}
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
||||||
}
|
}
|
||||||
|
|
||||||
rawState, err := os.ReadFile(filepath.Join(conf.Basedir.Work, stateDir, dbPkg.Repository.String()+"-"+conf.Arch, dbPkg.Pkgbase))
|
|
||||||
if err != nil {
|
|
||||||
log.Infof("[HK] state not found for %s->%s: %v, removing package", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: fullRepo,
|
|
||||||
PkgFiles: existingSplits,
|
|
||||||
March: march,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
state, err := parseState(string(rawState))
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[HK] error parsing state file for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if dbPkg.TagRev != nil && state.TagRev == *dbPkg.TagRev && state.PkgVer != dbPkg.Version {
|
|
||||||
log.Infof("[HK] reseting package %s->%s with mismatched state information (%s!=%s)",
|
|
||||||
fullRepo, dbPkg.Pkgbase, state.PkgVer, dbPkg.Version)
|
|
||||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
|
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
|
||||||
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
|
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
|
||||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(ctx)
|
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.RepoVersion != "" && !strings.HasPrefix(dbPkg.SkipReason, "delayed"):
|
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.RepoVersion != "" && strings.HasPrefix(dbPkg.SkipReason, "blacklisted"):
|
||||||
log.Infof("[HK] delete skipped package %s->%s", fullRepo, dbPkg.Pkgbase)
|
log.Infof("[HK] delete blacklisted package %s->%s", fullRepo, dbPkg.Pkgbase)
|
||||||
pkg := &ProtoPackage{
|
|
||||||
FullRepo: fullRepo,
|
|
||||||
March: march,
|
|
||||||
DBPackage: dbPkg,
|
|
||||||
}
|
|
||||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
|
||||||
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.SkipReason == "blacklisted" && !MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
|
|
||||||
log.Infof("[HK] requeue previously blacklisted package %s->%s", fullRepo, dbPkg.Pkgbase)
|
|
||||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearSkipReason().ClearTagRev().Exec(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case dbPkg.Status == dbpackage.StatusFailed && dbPkg.RepoVersion != "":
|
|
||||||
log.Infof("[HK] package %s->%s failed but still present in repo, removing", fullRepo, dbPkg.Pkgbase)
|
|
||||||
pkg := &ProtoPackage{
|
pkg := &ProtoPackage{
|
||||||
FullRepo: fullRepo,
|
FullRepo: fullRepo,
|
||||||
March: march,
|
March: march,
|
||||||
@@ -266,7 +193,7 @@ func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func logHK(ctx context.Context) error {
|
func logHK() error {
|
||||||
// check if package for log exists and if error can be fixed by rebuild
|
// check if package for log exists and if error can be fixed by rebuild
|
||||||
logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log"))
|
logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -295,7 +222,7 @@ func logHK(ctx context.Context) error {
|
|||||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
dbpackage.Pkgbase(pkg.Pkgbase),
|
||||||
dbpackage.March(pkg.March),
|
dbpackage.March(pkg.March),
|
||||||
dbpackage.StatusEQ(dbpackage.StatusSkipped),
|
dbpackage.StatusEQ(dbpackage.StatusSkipped),
|
||||||
).Exist(ctx)
|
).Exist(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -314,7 +241,7 @@ func logHK(ctx context.Context) error {
|
|||||||
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) ||
|
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) ||
|
||||||
reDownloadError2.MatchString(sLogContent) {
|
reDownloadError2.MatchString(sLogContent) {
|
||||||
rows, err := db.DBPackage.Update().Where(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
|
rows, err := db.DBPackage.Update().Where(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
|
||||||
dbpackage.StatusEQ(dbpackage.StatusFailed)).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(ctx)
|
dbpackage.StatusEQ(dbpackage.StatusFailed)).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -328,7 +255,7 @@ func logHK(ctx context.Context) error {
|
|||||||
dbpackage.March(pkg.March),
|
dbpackage.March(pkg.March),
|
||||||
dbpackage.StatusEQ(dbpackage.StatusFailed),
|
dbpackage.StatusEQ(dbpackage.StatusFailed),
|
||||||
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
|
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
|
||||||
).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(ctx)
|
).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -340,16 +267,3 @@ func logHK(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func debugHK() {
|
|
||||||
for _, march := range conf.March {
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march)); err == nil {
|
|
||||||
log.Debugf("[DHK/%s] start cleanup debug packages", march)
|
|
||||||
cleanCmd := exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Debug, march), "-k", "1")
|
|
||||||
res, err := cleanCmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("[DHK/%s] cleanup debug packages failed: %v (%s)", march, err, string(res))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
17
main.go
17
main.go
@@ -27,7 +27,6 @@ var (
|
|||||||
db *ent.Client
|
db *ent.Client
|
||||||
journalLog = flag.Bool("journal", false, "Log to systemd journal instead of stdout")
|
journalLog = flag.Bool("journal", false, "Log to systemd journal instead of stdout")
|
||||||
checkInterval = flag.Int("interval", 5, "How often svn2git should be checked in minutes (default: 5)")
|
checkInterval = flag.Int("interval", 5, "How often svn2git should be checked in minutes (default: 5)")
|
||||||
configFile = flag.String("config", "config.yaml", "set config file name/path")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -39,7 +38,7 @@ func main() {
|
|||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
confStr, err := os.ReadFile(*configFile)
|
confStr, err := os.ReadFile("config.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error reading config file: %v", err)
|
log.Fatalf("error reading config file: %v", err)
|
||||||
}
|
}
|
||||||
@@ -86,8 +85,7 @@ func main() {
|
|||||||
}(db)
|
}(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
if err := db.Schema.Create(context.Background(), migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
|
||||||
if err := db.Schema.Create(ctx, migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
|
|
||||||
log.Panicf("automigrate failed: %v", err)
|
log.Panicf("automigrate failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,13 +99,11 @@ func main() {
|
|||||||
repoWG: new(sync.WaitGroup),
|
repoWG: new(sync.WaitGroup),
|
||||||
}
|
}
|
||||||
|
|
||||||
buildManager.setupMetrics(conf.Metrics.Port)
|
err = setupChroot()
|
||||||
|
|
||||||
err = setupChroot(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panicf("unable to setup chroot: %v", err)
|
log.Panicf("unable to setup chroot: %v", err)
|
||||||
}
|
}
|
||||||
err = syncMarchs(ctx)
|
err = syncMarchs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panicf("error syncing marchs: %v", err)
|
log.Panicf("error syncing marchs: %v", err)
|
||||||
}
|
}
|
||||||
@@ -118,9 +114,12 @@ func main() {
|
|||||||
log.Panicf("error while ALPM-init: %v", err)
|
log.Panicf("error while ALPM-init: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
_ = buildManager.syncWorker(ctx)
|
_ = buildManager.syncWorker(ctx)
|
||||||
}()
|
}()
|
||||||
|
go buildManager.htmlWorker(ctx)
|
||||||
|
|
||||||
killLoop:
|
killLoop:
|
||||||
for {
|
for {
|
||||||
@@ -128,7 +127,7 @@ killLoop:
|
|||||||
case <-killSignals:
|
case <-killSignals:
|
||||||
break killLoop
|
break killLoop
|
||||||
case <-reloadSignals:
|
case <-reloadSignals:
|
||||||
confStr, err := os.ReadFile(*configFile)
|
confStr, err := os.ReadFile("config.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panicf("unable to open config: %v", err)
|
log.Panicf("unable to open config: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
26
metrics.go
26
metrics.go
@@ -1,26 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *BuildManager) setupMetrics(port uint32) {
|
|
||||||
b.metrics.queueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Name: "build_queue_size",
|
|
||||||
Help: "Build queue size",
|
|
||||||
}, []string{"repository", "status"})
|
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
mux.Handle("/", promhttp.Handler())
|
|
||||||
go func() {
|
|
||||||
err := http.ListenAndServe(fmt.Sprintf(":%d", port), mux) //nolint:gosec
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to start metrics server: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
10
package.go
10
package.go
@@ -70,12 +70,12 @@ func (pkg Package) HasValidSignature() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DBPackage returns ent.DBPackage for package
|
// DBPackage returns ent.DBPackage for package
|
||||||
func (pkg Package) DBPackage(ctx context.Context, db *ent.Client) (*ent.DBPackage, error) {
|
func (pkg Package) DBPackage(db *ent.Client) (*ent.DBPackage, error) {
|
||||||
return pkg.DBPackageIsolated(ctx, *pkg.MArch(), pkg.Repo(), db)
|
return pkg.DBPackageIsolated(*pkg.MArch(), pkg.Repo(), db)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
|
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
|
||||||
func (pkg Package) DBPackageIsolated(ctx context.Context, march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
|
func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
|
||||||
dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
|
dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
|
||||||
s.Where(
|
s.Where(
|
||||||
sql.And(
|
sql.And(
|
||||||
@@ -83,9 +83,9 @@ func (pkg Package) DBPackageIsolated(ctx context.Context, march string, repo dbp
|
|||||||
sql.EQ(dbpackage.FieldMarch, march),
|
sql.EQ(dbpackage.FieldMarch, march),
|
||||||
sql.EQ(dbpackage.FieldRepository, repo)),
|
sql.EQ(dbpackage.FieldRepository, repo)),
|
||||||
)
|
)
|
||||||
}).Only(ctx)
|
}).Only(context.Background())
|
||||||
if ent.IsNotFound(err) {
|
if ent.IsNotFound(err) {
|
||||||
log.Debugf("not found in database: %s", pkg.Name())
|
log.Debugf("Not found in database: %s", pkg.Name())
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
165
proto_package.go
165
proto_package.go
@@ -50,36 +50,28 @@ func (p *ProtoPackage) isEligible(ctx context.Context) bool {
|
|||||||
p.DBPackage.SkipReason = "arch = any"
|
p.DBPackage.SkipReason = "arch = any"
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||||
skipping = true
|
skipping = true
|
||||||
case MatchGlobList(p.Pkgbase, conf.Blacklist.Packages):
|
case Contains(conf.Blacklist.Packages, p.Pkgbase):
|
||||||
log.Debugf("skipped %s: package on no-build list", p.Pkgbase)
|
log.Debugf("skipped %s: blacklisted package", p.Pkgbase)
|
||||||
p.DBPackage.SkipReason = "blacklisted"
|
p.DBPackage.SkipReason = "blacklisted"
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||||
skipping = true
|
skipping = true
|
||||||
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: //nolint:gosec
|
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit:
|
||||||
log.Debugf("skipped %s: memory limit exceeded (%s)", p.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) //nolint:gosec
|
log.Debugf("skipped %s: memory limit exceeded (%s)", p.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB)
|
||||||
p.DBPackage.SkipReason = "memory limit exceeded"
|
p.DBPackage.SkipReason = "memory limit exceeded"
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||||
skipping = true
|
skipping = true
|
||||||
case p.isPkgFailed():
|
case p.isPkgFailed():
|
||||||
log.Debugf("skipped %s: failed build", p.Pkgbase)
|
log.Debugf("skipped %s: failed build", p.Pkgbase)
|
||||||
skipping = true
|
skipping = true
|
||||||
case p.Srcinfo != nil:
|
|
||||||
// skip haskell packages, since they cannot be optimized currently (no -O3 & march has no effect as far as I know)
|
|
||||||
if Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
|
||||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc") {
|
|
||||||
log.Debugf("skipped %s: haskell", p.Pkgbase)
|
|
||||||
p.DBPackage.SkipReason = "haskell"
|
|
||||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
|
||||||
skipping = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if skipping {
|
if skipping {
|
||||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SetStatus(p.DBPackage.Status).
|
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SetStatus(p.DBPackage.Status).
|
||||||
SetSkipReason(p.DBPackage.SkipReason).SetTagRev(p.State.TagRev).SaveX(ctx)
|
SetSkipReason(p.DBPackage.SkipReason).SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||||
return false
|
return false
|
||||||
|
} else {
|
||||||
|
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SaveX(ctx)
|
||||||
}
|
}
|
||||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SaveX(ctx)
|
|
||||||
|
|
||||||
if Contains(conf.Blacklist.LTO, p.Pkgbase) && p.DBPackage.Lto != dbpackage.LtoDisabled {
|
if Contains(conf.Blacklist.LTO, p.Pkgbase) && p.DBPackage.Lto != dbpackage.LtoDisabled {
|
||||||
p.DBPackage = p.DBPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
|
p.DBPackage = p.DBPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
|
||||||
@@ -88,7 +80,7 @@ func (p *ProtoPackage) isEligible(ctx context.Context) bool {
|
|||||||
repoVer, err := p.repoVersion()
|
repoVer, err := p.repoVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
|
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
|
||||||
} else if alpm.VerCmp(repoVer, p.Version) > 0 {
|
} else if err == nil && alpm.VerCmp(repoVer, p.Version) > 0 {
|
||||||
log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", p.Pkgbase, p.Version, repoVer)
|
log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", p.Pkgbase, p.Version, repoVer)
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(p.State.TagRev).SaveX(ctx)
|
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||||
return false
|
return false
|
||||||
@@ -101,7 +93,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
start := time.Now().UTC()
|
start := time.Now().UTC()
|
||||||
chroot := "build_" + uuid.New().String()
|
chroot := "build_" + uuid.New().String()
|
||||||
|
|
||||||
buildFolder, err := p.setupBuildDir(ctx)
|
buildFolder, err := p.setupBuildDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
|
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
|
||||||
}
|
}
|
||||||
@@ -131,16 +123,14 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
|
|
||||||
isLatest, local, syncVersion, err := p.isMirrorLatest(alpmHandle)
|
isLatest, local, syncVersion, err := p.isMirrorLatest(alpmHandle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var multipleStateFilesError MultipleStateFilesError
|
switch err.(type) {
|
||||||
var unableToSatisfyError UnableToSatisfyError
|
|
||||||
switch {
|
|
||||||
default:
|
default:
|
||||||
return time.Since(start), fmt.Errorf("error solving deps: %w", err)
|
return time.Since(start), fmt.Errorf("error solving deps: %w", err)
|
||||||
case errors.As(err, &multipleStateFilesError):
|
case MultipleStateFilesError:
|
||||||
log.Infof("skipped %s: multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
log.Infof("skipped %s: multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
||||||
return time.Since(start), err
|
return time.Since(start), err
|
||||||
case errors.As(err, &unableToSatisfyError):
|
case UnableToSatisfyError:
|
||||||
log.Infof("skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
log.Infof("skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
||||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
||||||
return time.Since(start), ErrorNotEligible
|
return time.Since(start), ErrorNotEligible
|
||||||
@@ -205,33 +195,21 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec
|
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec
|
||||||
"-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config",
|
"-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config",
|
||||||
filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
|
filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
|
||||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
cmd.Stderr = &out
|
cmd.Stderr = &out
|
||||||
|
|
||||||
if err = cmd.Start(); err != nil {
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
return time.Since(start), fmt.Errorf("error starting build: %w", err)
|
return time.Since(start), fmt.Errorf("error starting build: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error getting PGID: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
done := make(chan bool)
|
|
||||||
result := make(chan int64)
|
|
||||||
go pollMemoryUsage(pgid, 1*time.Second, done, result)
|
|
||||||
|
|
||||||
err = cmd.Wait()
|
err = cmd.Wait()
|
||||||
close(done)
|
|
||||||
peakMem := <-result
|
|
||||||
close(result)
|
|
||||||
|
|
||||||
Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage)
|
Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Panicf("rusage is not of type *syscall.Rusage, are we running on unix-like?")
|
log.Panicf("Rusage is not of type *syscall.Rusage, are we running on unix-like?")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -242,21 +220,20 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
if p.DBPackage.Lto != dbpackage.LtoAutoDisabled && p.DBPackage.Lto != dbpackage.LtoDisabled &&
|
if p.DBPackage.Lto != dbpackage.LtoAutoDisabled && p.DBPackage.Lto != dbpackage.LtoDisabled &&
|
||||||
(reLdError.MatchString(out.String()) || reRustLTOError.MatchString(out.String())) {
|
(reLdError.MatchString(out.String()) || reRustLTOError.MatchString(out.String())) {
|
||||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
|
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
|
||||||
return time.Since(start), errors.New("ld/lto-incompatibility error detected, LTO disabled")
|
return time.Since(start), fmt.Errorf("ld/lto-incomp error detected, LTO disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if reDownloadError.MatchString(out.String()) || reDownloadError2.MatchString(out.String()) ||
|
if reDownloadError.MatchString(out.String()) || reDownloadError2.MatchString(out.String()) ||
|
||||||
rePortError.MatchString(out.String()) || reSigError.MatchString(out.String()) {
|
rePortError.MatchString(out.String()) || reSigError.MatchString(out.String()) {
|
||||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
|
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
|
||||||
return time.Since(start), errors.New("known build error detected")
|
return time.Since(start), fmt.Errorf("known builderror detected")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0o755)
|
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0o755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Since(start), fmt.Errorf("error creating logdir: %w", err)
|
return time.Since(start), fmt.Errorf("error creating logdir: %w", err)
|
||||||
}
|
}
|
||||||
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), //nolint:gosec
|
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), out.Bytes(), 0o644) //nolint:gosec
|
||||||
[]byte(strings.ToValidUTF8(out.String(), "")), 0o644)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Since(start), fmt.Errorf("error warting to logdir: %w", err)
|
return time.Since(start), fmt.Errorf("error warting to logdir: %w", err)
|
||||||
}
|
}
|
||||||
@@ -282,7 +259,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(pkgFiles) == 0 {
|
if len(pkgFiles) == 0 {
|
||||||
return time.Since(start), errors.New("no build-artifacts found")
|
return time.Since(start), fmt.Errorf("no build-artifacts found")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range pkgFiles {
|
for _, file := range pkgFiles {
|
||||||
@@ -322,12 +299,12 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
updatePkg := p.DBPackage.Update().
|
updatePkg := p.DBPackage.Update().
|
||||||
SetStatus(dbpackage.StatusBuilt).
|
SetStatus(dbpackage.StatusBuild).
|
||||||
SetLto(dbpackage.LtoEnabled).
|
SetLto(dbpackage.LtoEnabled).
|
||||||
SetBuildTimeStart(start).
|
SetBuildTimeStart(start).
|
||||||
SetLastVersionBuild(p.Version).
|
SetLastVersionBuild(p.Version).
|
||||||
SetTagRev(p.State.TagRev).
|
SetTagRev(p.State.TagRev).
|
||||||
SetMaxRss(peakMem).
|
SetMaxRss(Rusage.Maxrss).
|
||||||
SetIoOut(Rusage.Oublock).
|
SetIoOut(Rusage.Oublock).
|
||||||
SetIoIn(Rusage.Inblock).
|
SetIoIn(Rusage.Inblock).
|
||||||
SetUTime(Rusage.Utime.Sec).
|
SetUTime(Rusage.Utime.Sec).
|
||||||
@@ -342,7 +319,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
|||||||
return time.Since(start), nil
|
return time.Since(start), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProtoPackage) setupBuildDir(ctx context.Context) (string, error) {
|
func (p *ProtoPackage) setupBuildDir() (string, error) {
|
||||||
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
|
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
|
||||||
|
|
||||||
err := cleanBuildDir(buildDir, "")
|
err := cleanBuildDir(buildDir, "")
|
||||||
@@ -364,8 +341,8 @@ func (p *ProtoPackage) setupBuildDir(ctx context.Context) (string, error) {
|
|||||||
gr := retry.NewFibonacci(10 * time.Second)
|
gr := retry.NewFibonacci(10 * time.Second)
|
||||||
gr = retry.WithMaxRetries(conf.MaxCloneRetries, gr)
|
gr = retry.WithMaxRetries(conf.MaxCloneRetries, gr)
|
||||||
|
|
||||||
if err := retry.Do(ctx, gr, func(ctx context.Context) error {
|
if err := retry.Do(context.Background(), gr, func(ctx context.Context) error {
|
||||||
cmd := exec.CommandContext(ctx, "git", "clone", "--depth", "1", "--branch", p.State.TagVer, //nolint:gosec
|
cmd := exec.Command("git", "clone", "--depth", "1", "--branch", p.State.TagVer, //nolint:gosec
|
||||||
fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s.git", gitlabPath), buildDir)
|
fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s.git", gitlabPath), buildDir)
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
@@ -382,12 +359,13 @@ func (p *ProtoPackage) setupBuildDir(ctx context.Context) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProtoPackage) repoVersion() (string, error) {
|
func (p *ProtoPackage) repoVersion() (string, error) {
|
||||||
if err := p.findPkgFiles(); err != nil {
|
err := p.findPkgFiles()
|
||||||
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(p.PkgFiles) == 0 {
|
if len(p.PkgFiles) == 0 {
|
||||||
return "", errors.New("not found")
|
return "", fmt.Errorf("not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
fNameSplit := strings.Split(p.PkgFiles[0], "-")
|
fNameSplit := strings.Split(p.PkgFiles[0], "-")
|
||||||
@@ -423,24 +401,7 @@ func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// increase buildno if already existing
|
nStr := rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+p.Srcinfo.Pkgrel+"."+strconv.Itoa(buildNo))
|
||||||
var nStr string
|
|
||||||
if strings.Contains(p.Srcinfo.Pkgrel, ".") {
|
|
||||||
pkgRelSplit := strings.Split(p.Srcinfo.Pkgrel, ".")
|
|
||||||
pkgRelBuildNo, err := strconv.Atoi(pkgRelSplit[len(pkgRelSplit)-1])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
nStr = rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+pkgRelSplit[0]+"."+strconv.Itoa(buildNo+pkgRelBuildNo))
|
|
||||||
versionSplit := strings.Split(p.Version, "-")
|
|
||||||
versionSplit[len(versionSplit)-1] = pkgRelSplit[0] + "." + strconv.Itoa(buildNo+pkgRelBuildNo)
|
|
||||||
p.Version = strings.Join(versionSplit, "-")
|
|
||||||
} else {
|
|
||||||
nStr = rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+p.Srcinfo.Pkgrel+"."+strconv.Itoa(buildNo))
|
|
||||||
p.Version += "." + strconv.Itoa(buildNo)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.Seek(0, 0)
|
_, err = f.Seek(0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -455,6 +416,7 @@ func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.Version += "." + strconv.Itoa(buildNo)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -477,31 +439,28 @@ func (p *ProtoPackage) importKeys() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProtoPackage) isAvailable(ctx context.Context, h *alpm.Handle) bool {
|
func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
|
||||||
dbs, err := h.SyncDBs()
|
dbs, err := h.SyncDBs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
buildManager.alpmMutex.Lock()
|
buildManager.alpmMutex.Lock()
|
||||||
defer buildManager.alpmMutex.Unlock()
|
|
||||||
|
|
||||||
var pkg alpm.IPackage
|
var pkg alpm.IPackage
|
||||||
switch {
|
if p.Srcinfo != nil {
|
||||||
case p.Srcinfo != nil:
|
|
||||||
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
||||||
case p.DBPackage != nil && len(p.DBPackage.Packages) > 0:
|
} else if p.DBPackage != nil && len(p.DBPackage.Packages) > 0 {
|
||||||
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
|
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
|
||||||
default:
|
} else {
|
||||||
cmd := exec.CommandContext(ctx, "unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String(), //nolint:gosec
|
cmd := exec.Command("unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String()) //nolint:gosec
|
||||||
"--sysroot="+filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
|
||||||
var res []byte
|
var res []byte
|
||||||
res, err = cmd.Output()
|
res, err = cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
|
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
|
||||||
|
buildManager.alpmMutex.Unlock()
|
||||||
return false
|
return false
|
||||||
} else if len(res) == 0 {
|
} else if len(res) == 0 {
|
||||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
buildManager.alpmMutex.Unlock()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -514,17 +473,16 @@ func (p *ProtoPackage) isAvailable(ctx context.Context, h *alpm.Handle) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if p.DBPackage != nil {
|
if p.DBPackage != nil {
|
||||||
p.DBPackage, err = p.DBPackage.Update().SetPackages(splitPkgs).Save(ctx)
|
p.DBPackage = p.DBPackage.Update().SetPackages(splitPkgs).SaveX(context.Background())
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pkg, err = dbs.FindSatisfier(splitPkgs[0])
|
pkg, err = dbs.FindSatisfier(splitPkgs[0])
|
||||||
} else {
|
} else {
|
||||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
||||||
|
buildManager.alpmMutex.Unlock()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
buildManager.alpmMutex.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("error resolving %s: %v", p.Pkgbase, err)
|
log.Debugf("error resolving %s: %v", p.Pkgbase, err)
|
||||||
return false
|
return false
|
||||||
@@ -546,7 +504,7 @@ func (p *ProtoPackage) isAvailable(ctx context.Context, h *alpm.Handle) bool {
|
|||||||
|
|
||||||
func (p *ProtoPackage) GitVersion(h *alpm.Handle) (string, error) {
|
func (p *ProtoPackage) GitVersion(h *alpm.Handle) (string, error) {
|
||||||
if p.Pkgbase == "" {
|
if p.Pkgbase == "" {
|
||||||
return "", errors.New("invalid arguments")
|
return "", fmt.Errorf("invalid arguments")
|
||||||
}
|
}
|
||||||
|
|
||||||
stateFiles, _ := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase))
|
stateFiles, _ := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase))
|
||||||
@@ -653,15 +611,15 @@ func (p *ProtoPackage) findPkgFiles() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if p.DBPackage == nil && p.Srcinfo == nil {
|
if p.DBPackage == nil && p.Srcinfo == nil {
|
||||||
return errors.New("unable to find pkgfiles without dbpkg or srcinfo present")
|
return fmt.Errorf("unable to find pkgfiles without dbpkg or srcinfo present")
|
||||||
}
|
}
|
||||||
|
|
||||||
var realPkgs []string
|
var realPkgs []string
|
||||||
if p.DBPackage != nil {
|
if p.DBPackage != nil {
|
||||||
realPkgs = append(realPkgs, p.DBPackage.Packages...)
|
realPkgs = append(realPkgs, p.DBPackage.Packages...)
|
||||||
} else {
|
} else {
|
||||||
for i := range p.Srcinfo.Packages {
|
for _, realPkg := range p.Srcinfo.Packages {
|
||||||
realPkgs = append(realPkgs, p.Srcinfo.Packages[i].Pkgname)
|
realPkgs = append(realPkgs, realPkg.Pkgname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -680,7 +638,7 @@ func (p *ProtoPackage) findPkgFiles() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProtoPackage) toDBPackage(ctx context.Context, create bool) error {
|
func (p *ProtoPackage) toDBPackage(create bool) error {
|
||||||
if p.DBPackage != nil {
|
if p.DBPackage != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -689,16 +647,13 @@ func (p *ProtoPackage) toDBPackage(ctx context.Context, create bool) error {
|
|||||||
dbpackage.Pkgbase(p.Pkgbase),
|
dbpackage.Pkgbase(p.Pkgbase),
|
||||||
dbpackage.March(p.March),
|
dbpackage.March(p.March),
|
||||||
dbpackage.RepositoryEQ(p.Repo),
|
dbpackage.RepositoryEQ(p.Repo),
|
||||||
).Only(ctx)
|
).Only(context.Background())
|
||||||
if err != nil && ent.IsNotFound(err) && create {
|
if err != nil && ent.IsNotFound(err) && create {
|
||||||
dbPkg, err = db.DBPackage.Create().
|
dbPkg = db.DBPackage.Create().
|
||||||
SetPkgbase(p.Pkgbase).
|
SetPkgbase(p.Pkgbase).
|
||||||
SetMarch(p.March).
|
SetMarch(p.March).
|
||||||
SetRepository(p.Repo).
|
SetRepository(p.Repo).
|
||||||
Save(ctx)
|
SaveX(context.Background())
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if err != nil && !ent.IsNotFound(err) {
|
} else if err != nil && !ent.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -716,7 +671,7 @@ func (p *ProtoPackage) exists() (bool, error) {
|
|||||||
return dbPkg, nil
|
return dbPkg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg *alpm.Package, version string, err error) {
|
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg alpm.IPackage, version string, err error) {
|
||||||
dbs, err := h.SyncDBs()
|
dbs, err := h.SyncDBs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, "", err
|
return false, nil, "", err
|
||||||
@@ -739,23 +694,18 @@ func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg *al
|
|||||||
return false, nil, "", UnableToSatisfyError{err}
|
return false, nil, "", UnableToSatisfyError{err}
|
||||||
}
|
}
|
||||||
|
|
||||||
svn2gitVer, err := (&ProtoPackage{ //nolint:exhaustruct,exhaustivestruct
|
svn2gitVer, err := (&ProtoPackage{
|
||||||
Pkgbase: pkg.Base(),
|
Pkgbase: pkg.Base(),
|
||||||
March: p.March,
|
March: p.March,
|
||||||
}).GitVersion(h)
|
}).GitVersion(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, "", err
|
return false, nil, "", err
|
||||||
} else if svn2gitVer == "" {
|
} else if svn2gitVer == "" {
|
||||||
return false, nil, "", errors.New("no svn2git version")
|
return false, nil, "", fmt.Errorf("no svn2git version")
|
||||||
}
|
}
|
||||||
|
|
||||||
if alpm.VerCmp(svn2gitVer, pkg.Version()) > 0 {
|
if alpm.VerCmp(svn2gitVer, pkg.Version()) > 0 {
|
||||||
switch v := pkg.(type) {
|
return false, pkg, svn2gitVer, nil
|
||||||
case *alpm.Package:
|
|
||||||
return false, v, svn2gitVer, nil
|
|
||||||
default:
|
|
||||||
return false, nil, "", fmt.Errorf("invalid package type: %T", pkg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -765,16 +715,3 @@ func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg *al
|
|||||||
func (p *ProtoPackage) PkgbaseEquals(p2 *ProtoPackage, marchSensitive bool) bool {
|
func (p *ProtoPackage) PkgbaseEquals(p2 *ProtoPackage, marchSensitive bool) bool {
|
||||||
return (marchSensitive && (p.Pkgbase == p2.Pkgbase && p.FullRepo == p2.FullRepo)) || (!marchSensitive && p.Pkgbase == p2.Pkgbase)
|
return (marchSensitive && (p.Pkgbase == p2.Pkgbase && p.FullRepo == p2.FullRepo)) || (!marchSensitive && p.Pkgbase == p2.Pkgbase)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProtoPackage) IsBuilt() (bool, error) {
|
|
||||||
if p.DBPackage == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
matches, err := filepath.Glob(filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo, p.DBPackage.Packages[0]+"*-x86_64.pkg.tar.zst"))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(matches) > 0, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -51,55 +51,10 @@ package() {
|
|||||||
# vim:set sw=2 et:
|
# vim:set sw=2 et:
|
||||||
`
|
`
|
||||||
|
|
||||||
const PkgbuildTestWithPkgrelSub = `# Maintainer: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
|
func TestIncreasePkgRel(t *testing.T) {
|
||||||
|
pkgbuild, err := os.CreateTemp("", "")
|
||||||
pkgname=gnome-todo
|
|
||||||
pkgver=41.0+r69+ga9a5b7cd
|
|
||||||
pkgrel=1.1
|
|
||||||
pkgdesc="Task manager for GNOME"
|
|
||||||
url="https://wiki.gnome.org/Apps/Todo"
|
|
||||||
arch=(x86_64)
|
|
||||||
license=(GPL)
|
|
||||||
depends=(evolution-data-server libpeas python gtk4 libportal-gtk4 libadwaita)
|
|
||||||
makedepends=(gobject-introspection appstream-glib git meson yelp-tools)
|
|
||||||
groups=(gnome-extra)
|
|
||||||
_commit=a9a5b7cdde0244331d2d49220f04018be60c018e # master
|
|
||||||
source=("git+https://gitlab.gnome.org/GNOME/gnome-todo.git#commit=$_commit")
|
|
||||||
sha256sums=('SKIP')
|
|
||||||
|
|
||||||
pkgver() {
|
|
||||||
cd $pkgname
|
|
||||||
git describe --tags | sed 's/^GNOME_TODO_//;s/_/./g;s/[^-]*-g/r&/;s/-/+/g'
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
prepare() {
|
|
||||||
cd $pkgname
|
|
||||||
}
|
|
||||||
|
|
||||||
build() {
|
|
||||||
arch-meson $pkgname build
|
|
||||||
meson compile -C build
|
|
||||||
}
|
|
||||||
|
|
||||||
check() (
|
|
||||||
glib-compile-schemas "${GSETTINGS_SCHEMA_DIR:=$PWD/$pkgname/data}"
|
|
||||||
export GSETTINGS_SCHEMA_DIR
|
|
||||||
|
|
||||||
meson test -C build --print-errorlogs
|
|
||||||
)
|
|
||||||
|
|
||||||
package() {
|
|
||||||
meson install -C build --destdir "$pkgdir"
|
|
||||||
}
|
|
||||||
|
|
||||||
# vim:set sw=2 et:
|
|
||||||
`
|
|
||||||
|
|
||||||
func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
|
|
||||||
pkgbuild, err := os.CreateTemp(t.TempDir(), "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("unable to setup temp. PKGBUILD")
|
t.Fatal("Unable to setup temp. PKGBUILD")
|
||||||
}
|
}
|
||||||
defer func(name string) {
|
defer func(name string) {
|
||||||
_ = os.Remove(name)
|
_ = os.Remove(name)
|
||||||
@@ -107,7 +62,7 @@ func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
|
|||||||
|
|
||||||
_, err = pkgbuild.WriteString(PkgbuildTest)
|
_, err = pkgbuild.WriteString(PkgbuildTest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("unable to write to temp. PKGBUILD")
|
t.Fatal("Unable to write to temp. PKGBUILD")
|
||||||
}
|
}
|
||||||
_ = pkgbuild.Close()
|
_ = pkgbuild.Close()
|
||||||
|
|
||||||
@@ -140,48 +95,3 @@ func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
|
|||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIncreasePkgRelWithPkgSub(t *testing.T) { //nolint:paralleltest
|
|
||||||
pkgbuild, err := os.CreateTemp(t.TempDir(), "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("unable to setup temp. PKGBUILD")
|
|
||||||
}
|
|
||||||
defer func(name string) {
|
|
||||||
_ = os.Remove(name)
|
|
||||||
}(pkgbuild.Name())
|
|
||||||
|
|
||||||
_, err = pkgbuild.WriteString(PkgbuildTestWithPkgrelSub)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("unable to write to temp. PKGBUILD")
|
|
||||||
}
|
|
||||||
_ = pkgbuild.Close()
|
|
||||||
|
|
||||||
buildPkg := &ProtoPackage{
|
|
||||||
Pkgbase: "gnome-todo",
|
|
||||||
Pkgbuild: pkgbuild.Name(),
|
|
||||||
}
|
|
||||||
|
|
||||||
err = buildPkg.increasePkgRel(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("increasePkgRel: %v", err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
versionSplit := strings.Split(buildPkg.Version, "-")
|
|
||||||
if versionSplit[len(versionSplit)-1] != "1.2" {
|
|
||||||
t.Logf("increasePkgRel: expected 1.2 pkgrel, got: %s", buildPkg.Version)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
buildPkg.Srcinfo = nil
|
|
||||||
err = buildPkg.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("increasePkgRel: %v", err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
if buildPkg.Srcinfo.Pkgrel != "1.2" {
|
|
||||||
t.Logf("increasePkgRel: expected 1.2 pkgrel, got: %s", buildPkg.Srcinfo.Pkgrel)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
186
tpl/packages.html
Normal file
186
tpl/packages.html
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta content="width=device-width, initial-scale=1" name="viewport">
|
||||||
|
|
||||||
|
<title>ALHP Status</title>
|
||||||
|
|
||||||
|
<meta content="dark light" name="color-scheme">
|
||||||
|
<link crossorigin="anonymous" href="https://cdn.jsdelivr.net/npm/bootstrap-dark-5@1.1.3/dist/css/bootstrap-dark.css"
|
||||||
|
integrity="sha256-jtwIepgD1ro9ko1W5a6PAGr8IUIXA3FqBZPAXNYVREE=" rel="stylesheet">
|
||||||
|
<link crossorigin="anonymous" href="https://cdn.jsdelivr.net/npm/fork-awesome@1.2.0/css/fork-awesome.min.css"
|
||||||
|
integrity="sha256-XoaMnoYC5TH6/+ihMEnospgm0J1PM/nioxbOUdnM8HY=" rel="stylesheet">
|
||||||
|
<style>
|
||||||
|
.accordion:last-child {
|
||||||
|
padding-bottom: 8vh;
|
||||||
|
}
|
||||||
|
|
||||||
|
.info-box {
|
||||||
|
overflow: hidden;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body style="background: #111217 !important;">
|
||||||
|
<nav class="navbar navbar-expand-lg sticky-top navbar-light bg-info">
|
||||||
|
<div class="container">
|
||||||
|
<div class="d-flex justify-content-start">
|
||||||
|
<span class="navbar-brand align-middle">ALHP Status</span>
|
||||||
|
<span class="navbar-text">
|
||||||
|
<a class="align-middle" href="https://somegit.dev/ALHP/ALHP.GO"><i
|
||||||
|
class="fa fa-gitea fs-4"></i></a>
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="d-flex justify-content-end">
|
||||||
|
<input type="search" placeholder="Search for packages.." class="form-control" id="table-sort-input"
|
||||||
|
title="Search for package"/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<div class="container">
|
||||||
|
|
||||||
|
<div class="pt-4 pb-4">
|
||||||
|
<h4>Buildserver Stats</h4>
|
||||||
|
<iframe allowtransparency="true" class="container-fluid rounded-1 overflow-hidden" height="400px"
|
||||||
|
src="https://stats.itsh.dev/public-dashboards/0fb04abb0c5e4b7390cf26a98e6dead1"></iframe>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{range $march := .March}}
|
||||||
|
<h3 class="mt-5">{{$march.Name}}</h3>
|
||||||
|
<div class="accordion" id="accordion-{{$march.Name}}">
|
||||||
|
{{range $repo := $march.Repos}}
|
||||||
|
<div class="accordion-item bg-opacity-25">
|
||||||
|
<h2 class="accordion-header" id="heading-{{$march.Name}}-{{$repo.Name}}">
|
||||||
|
<button aria-controls="collapse-{{$march.Name}}-{{$repo.Name}}" aria-expanded="false"
|
||||||
|
class="accordion-button"
|
||||||
|
data-bs-target="#collapse-{{$march.Name}}-{{$repo.Name}}"
|
||||||
|
data-bs-toggle="collapse"
|
||||||
|
type="button">{{$repo.Name}}-{{$march.Name}}
|
||||||
|
</button>
|
||||||
|
</h2>
|
||||||
|
<div aria-labelledby="heading-{{$march.Name}}-{{$repo.Name}}"
|
||||||
|
class="accordion-collapse collapse show"
|
||||||
|
data-bs-parent="#accordion-{{$march.Name}}" id="collapse-{{$march.Name}}-{{$repo.Name}}">
|
||||||
|
<div class="accordion-body overflow-auto">
|
||||||
|
<table class="table table-sorted">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th scope="col">Pkgbase</th>
|
||||||
|
<th scope="col">Status</th>
|
||||||
|
<th scope="col">Reason</th>
|
||||||
|
<th class="text-center" scope="col"
|
||||||
|
title="link time optimization does not guarantee that package is actually build with LTO">
|
||||||
|
LTO
|
||||||
|
</th>
|
||||||
|
<th class="text-center" scope="col" title="Debug-symbols available via debuginfod">DS
|
||||||
|
</th>
|
||||||
|
<th scope="col">Archlinux Version</th>
|
||||||
|
<th scope="col">{{$repo.Name}}-{{$march.Name}} Version</th>
|
||||||
|
<th class="text-end" scope="col">Info</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{{range $pkg := $repo.Packages}}
|
||||||
|
<tr class="table-{{$pkg.Class}}"
|
||||||
|
id="{{$repo.Name}}-{{$march.Name}}-{{$pkg.Pkgbase}}">
|
||||||
|
<td>{{$pkg.Pkgbase}}</td>
|
||||||
|
<td>{{$pkg.Status}}</td>
|
||||||
|
<td>{{$pkg.Skip}}</td>
|
||||||
|
<td class="text-center fs-6">
|
||||||
|
{{if $pkg.LTO}}<i class="fa fa-check fa-lg" style="color: var(--bs-success)"
|
||||||
|
title="build with LTO"></i>{{end}}
|
||||||
|
{{if $pkg.LTODisabled}}<i class="fa fa-times fa-lg" style="color: var(--bs-danger)"
|
||||||
|
title="LTO explicitly disabled"></i>{{end}}
|
||||||
|
{{if $pkg.LTOAutoDisabled}}<i class="fa fa-times-circle-o fa-lg"
|
||||||
|
style="color: var(--bs-danger)"
|
||||||
|
title="LTO automatically disabled"></i>{{end}}
|
||||||
|
{{if $pkg.LTOUnknown}}<i class="fa fa-hourglass-o fa-lg"
|
||||||
|
title="not build with LTO yet"></i>{{end}}
|
||||||
|
</td>
|
||||||
|
<td class="text-center fs-6">
|
||||||
|
{{if $pkg.DebugSym}}<i class="fa fa-check fa-lg" style="color: var(--bs-success)"
|
||||||
|
title="Debug symbols available"></i>{{end}}
|
||||||
|
{{if $pkg.DebugSymNotAvailable}}<i class="fa fa-times fa-lg"
|
||||||
|
style="color: var(--bs-danger)"
|
||||||
|
title="Not build with debug symbols"></i>{{end}}
|
||||||
|
{{if $pkg.DebugSymUnknown}}<i class="fa fa-hourglass-o fa-lg"
|
||||||
|
title="Not build yet"></i>{{end}}
|
||||||
|
</td>
|
||||||
|
<td>{{$pkg.Svn2GitVersion}}</td>
|
||||||
|
<td>{{$pkg.Version}}</td>
|
||||||
|
<td class="text-end info-box">
|
||||||
|
{{with $pkg.Log}}<a href="{{.}}" title="build log"
|
||||||
|
><i class="fa fa-file-text fa-lg"></i></a
|
||||||
|
>{{end}}
|
||||||
|
<a class="text-decoration-none fw-bold"
|
||||||
|
href="https://archlinux.org/packages/?q={{$pkg.Pkgbase}}" title="ArchWeb">AW</a>
|
||||||
|
<a data-bs-html="true" data-bs-placement="bottom" data-bs-toggle="tooltip"
|
||||||
|
href="#{{$repo.Name}}-{{$march.Name}}-{{$pkg.Pkgbase}}"
|
||||||
|
title="{{if $pkg.BuildDate}}Build on {{$pkg.BuildDate}} {{end}}{{if $pkg.BuildDuration}}CPU-Time: {{$pkg.BuildDuration}} {{end}}{{if $pkg.BuildMemory}}Peak-Memory: {{$pkg.BuildMemory}} {{end}}Last checked on {{$pkg.Checked}}">
|
||||||
|
<i class="fa fa-info-circle fa-lg"></i></a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<footer class="text-center text-lg-start bg-dark mt-3 fixed-bottom">
|
||||||
|
<div class="p-2 text-center">
|
||||||
|
{{.Latest}} <span class="text-primary">build</span>
|
||||||
|
{{.Queued}} <span class="text-warning">queued</span>
|
||||||
|
{{.Skipped}} <span class="text-secondary">skipped</span>
|
||||||
|
{{.Failed}} <span class="text-danger">failed</span>
|
||||||
|
||
|
||||||
|
LTO: {{.LTOEnabled}} <span class="text-success">enabled</span>
|
||||||
|
{{.LTODisabled}} <span class="text-danger">disabled</span>
|
||||||
|
{{.LTOUnknown}} <span class="text-secondary">unknown</span>
|
||||||
|
||
|
||||||
|
<span class="text-muted">{{.Generated}}</span>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
|
||||||
|
<script crossorigin="anonymous"
|
||||||
|
integrity="sha256-lSABj6XYH05NydBq+1dvkMu6uiCc/MbLYOFGRkf3iQs=" src="https://cdn.jsdelivr.net/npm/bootstrap@5.2.3/dist/js/bootstrap.bundle.min.js"></script>
|
||||||
|
<script>
|
||||||
|
let input = document.getElementById('table-sort-input');
|
||||||
|
let timeout = null;
|
||||||
|
|
||||||
|
input.addEventListener('input', function (e) {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
|
||||||
|
timeout = setTimeout(searchFilter, 200);
|
||||||
|
});
|
||||||
|
|
||||||
|
function searchFilter() {
|
||||||
|
let input, filter, tr, td, i, txtValue;
|
||||||
|
input = document.getElementById('table-sort-input')
|
||||||
|
filter = input.value.toUpperCase()
|
||||||
|
const tables = document.getElementsByClassName('table-sorted');
|
||||||
|
for (let j = 0; j < tables.length; j++) {
|
||||||
|
tr = tables[j].getElementsByTagName('tr')
|
||||||
|
for (i = 0; i < tr.length; i++) {
|
||||||
|
td = tr[i].getElementsByTagName('td')[0]
|
||||||
|
if (td) {
|
||||||
|
txtValue = td.textContent || td.innerText
|
||||||
|
if (txtValue.toUpperCase().indexOf(filter) > -1) {
|
||||||
|
tr[i].style.display = ''
|
||||||
|
} else {
|
||||||
|
tr[i].style.display = 'none'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
354
utils.go
354
utils.go
@@ -1,19 +1,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/Jguer/go-alpm/v2"
|
"github.com/Jguer/go-alpm/v2"
|
||||||
paconf "github.com/Morganamilo/go-pacmanconf"
|
paconf "github.com/Morganamilo/go-pacmanconf"
|
||||||
"github.com/Morganamilo/go-srcinfo"
|
"github.com/Morganamilo/go-srcinfo"
|
||||||
"github.com/c2h5oh/datasize"
|
"github.com/c2h5oh/datasize"
|
||||||
"github.com/gobwas/glob"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
"io"
|
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -27,7 +22,6 @@ import (
|
|||||||
const (
|
const (
|
||||||
pacmanConf = "/usr/share/devtools/pacman.conf.d/multilib.conf"
|
pacmanConf = "/usr/share/devtools/pacman.conf.d/multilib.conf"
|
||||||
makepkgConf = "/usr/share/devtools/makepkg.conf.d/x86_64.conf"
|
makepkgConf = "/usr/share/devtools/makepkg.conf.d/x86_64.conf"
|
||||||
makepkgConfExt = "/etc/makepkg.conf.d"
|
|
||||||
logDir = "logs"
|
logDir = "logs"
|
||||||
pristineChroot = "root"
|
pristineChroot = "root"
|
||||||
buildDir = "build"
|
buildDir = "build"
|
||||||
@@ -42,22 +36,21 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
reVar = regexp.MustCompile(`(?mU)^#?[^\S\r\n]*(\w+)[^\S\r\n]*=[^\S\r\n]*([("])([^)"]*)([)"])[^\S\r\n]*$`)
|
reVar = regexp.MustCompile(`(?mU)^#?[^\S\r\n]*(\w+)[^\S\r\n]*=[^\S\r\n]*([("])([^)"]*)([)"])[^\S\r\n]*$`)
|
||||||
reEnvClean = regexp.MustCompile(`(?m) ([\s\\]+) `)
|
reEnvClean = regexp.MustCompile(`(?m) ([\s\\]+) `)
|
||||||
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
|
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
|
||||||
rePkgFile = regexp.MustCompile(`^(.+)(?:-.+){2}-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
|
rePkgFile = regexp.MustCompile(`^(.+)(?:-.+){2}-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
|
||||||
reLdError = regexp.MustCompile(`(?mi).*collect2: error: ld returned (\d+) exit status.*`)
|
reLdError = regexp.MustCompile(`(?mi).*collect2: error: ld returned (\d+) exit status.*`)
|
||||||
reDownloadError = regexp.MustCompile(`(?m)^error: could not rename .+$`)
|
reDownloadError = regexp.MustCompile(`(?m)^error: could not rename .+$`)
|
||||||
reDownloadError2 = regexp.MustCompile(`(?m)^error: failed retrieving file '.+' from .*: The requested URL returned error: .+$`)
|
reDownloadError2 = regexp.MustCompile(`(?m)^error: failed retrieving file '.+' from .*: The requested URL returned error: .+$`)
|
||||||
rePortError = regexp.MustCompile(`(?m)^OSError: \x5bErrno 98\x5d Address already in use$`)
|
rePortError = regexp.MustCompile(`(?m)^OSError: \x5bErrno 98\x5d Address already in use$`)
|
||||||
reSigError = regexp.MustCompile(`(?m)^error: .*: signature from .* is invalid$`)
|
reSigError = regexp.MustCompile(`(?m)^error: .*: signature from .* is invalid$`)
|
||||||
reRustLTOError = regexp.MustCompile(`(?m)^error: options \x60-C (.+)\x60 and \x60-C lto\x60 are incompatible$`)
|
reRustLTOError = regexp.MustCompile(`(?m)^error: options \x60-C (.+)\x60 and \x60-C lto\x60 are incompatible$`)
|
||||||
reReplaceSinglePlus = regexp.MustCompile(`(?m)([a-zA-Z0-9]+)\+([a-zA-Z]+)`)
|
reReplaceSinglePlus = regexp.MustCompile(`(?m)([a-zA-Z0-9]+)\+([a-zA-Z]+)`)
|
||||||
reReplaceRemainingPlus = regexp.MustCompile(`(?m)\+`)
|
reReplaceRemainingPlus = regexp.MustCompile(`(?m)\+`)
|
||||||
reReplaceSpecialChars = regexp.MustCompile(`(?m)[^a-zA-Z0-9_\-.]`)
|
reReplaceSpecialChars = regexp.MustCompile(`(?m)[^a-zA-Z0-9_\-.]`)
|
||||||
reReplaceUnderscore = regexp.MustCompile(`(?m)[_\-]{2,}`)
|
reReplaceUnderscore = regexp.MustCompile(`(?m)[_\-]{2,}`)
|
||||||
reReplaceTree = regexp.MustCompile(`(?m)^tree$`)
|
reReplaceTree = regexp.MustCompile(`(?m)^tree$`)
|
||||||
reReplacePacsiftWarning = regexp.MustCompile(`(?m)^warning:.*\n*`)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Conf struct {
|
type Conf struct {
|
||||||
@@ -86,10 +79,12 @@ type Conf struct {
|
|||||||
Housekeeping struct {
|
Housekeeping struct {
|
||||||
Interval string
|
Interval string
|
||||||
}
|
}
|
||||||
MaxCloneRetries uint64 `yaml:"max_clone_retries"`
|
Status struct {
|
||||||
Metrics struct {
|
Class struct {
|
||||||
Port uint32
|
Skipped, Queued, Latest, Failed, Signing, Building, Unknown string
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
MaxCloneRetries uint64 `yaml:"max_clone_retries"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Globs []string
|
type Globs []string
|
||||||
@@ -116,10 +111,28 @@ func updateLastUpdated() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func statusID2string(s dbpackage.Status) string {
|
||||||
|
switch s {
|
||||||
|
case dbpackage.StatusSkipped:
|
||||||
|
return conf.Status.Class.Skipped
|
||||||
|
case dbpackage.StatusQueued:
|
||||||
|
return conf.Status.Class.Queued
|
||||||
|
case dbpackage.StatusLatest:
|
||||||
|
return conf.Status.Class.Latest
|
||||||
|
case dbpackage.StatusFailed:
|
||||||
|
return conf.Status.Class.Failed
|
||||||
|
case dbpackage.StatusSigning:
|
||||||
|
return conf.Status.Class.Signing
|
||||||
|
case dbpackage.StatusBuilding, dbpackage.StatusDelayed:
|
||||||
|
return conf.Status.Class.Building
|
||||||
|
default:
|
||||||
|
return conf.Status.Class.Unknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func cleanBuildDir(dir, chrootDir string) error {
|
func cleanBuildDir(dir, chrootDir string) error {
|
||||||
if stat, err := os.Stat(dir); err == nil && stat.IsDir() {
|
if stat, err := os.Stat(dir); err == nil && stat.IsDir() {
|
||||||
rmCmd := exec.Command("sudo", "rm_chroot.py", dir)
|
err = os.RemoveAll(dir)
|
||||||
_, err := rmCmd.CombinedOutput()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -145,7 +158,7 @@ func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize {
|
|||||||
var sum uint64
|
var sum uint64
|
||||||
for _, pkg := range pkgList {
|
for _, pkg := range pkgList {
|
||||||
if pkg.DBPackage.MaxRss != nil {
|
if pkg.DBPackage.MaxRss != nil {
|
||||||
sum += uint64(*pkg.DBPackage.MaxRss) //nolint:gosec
|
sum += uint64(*pkg.DBPackage.MaxRss)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,7 +170,7 @@ func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize {
|
|||||||
func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string, err error) {
|
func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string, err error) {
|
||||||
nameSplit := strings.Split(filepath.Base(filepath.Dir(stateFile)), "-")
|
nameSplit := strings.Split(filepath.Base(filepath.Dir(stateFile)), "-")
|
||||||
if len(nameSplit) < 2 {
|
if len(nameSplit) < 2 {
|
||||||
err = errors.New("error getting metainfo")
|
err = fmt.Errorf("error getting metainfo")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,7 +184,7 @@ func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func movePackagesLive(ctx context.Context, fullRepo string) error {
|
func movePackagesLive(fullRepo string) error {
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -191,7 +204,7 @@ func movePackagesLive(ctx context.Context, fullRepo string) error {
|
|||||||
|
|
||||||
for _, file := range pkgFiles {
|
for _, file := range pkgFiles {
|
||||||
pkg := Package(file)
|
pkg := Package(file)
|
||||||
dbPkg, err := pkg.DBPackageIsolated(ctx, march, dbpackage.Repository(repo), db)
|
dbPkg, err := pkg.DBPackageIsolated(march, dbpackage.Repository(repo), db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.HasSuffix(pkg.Name(), "-debug") {
|
if strings.HasSuffix(pkg.Name(), "-debug") {
|
||||||
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0o755)
|
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0o755)
|
||||||
@@ -207,11 +220,10 @@ func movePackagesLive(ctx context.Context, fullRepo string) error {
|
|||||||
filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = Copy(file, filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
err = os.Rename(file, filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = os.Remove(file)
|
|
||||||
_ = os.Remove(file + ".sig")
|
_ = os.Remove(file + ".sig")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -238,16 +250,14 @@ func movePackagesLive(ctx context.Context, fullRepo string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
err = Copy(file, filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file)))
|
err = os.Rename(file, filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = os.Remove(file)
|
err = os.Rename(file+".sig", filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file)+".sig"))
|
||||||
err = Copy(file+".sig", filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file)+".sig"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = os.Remove(file + ".sig")
|
|
||||||
|
|
||||||
toAdd = append(toAdd, &ProtoPackage{
|
toAdd = append(toAdd, &ProtoPackage{
|
||||||
DBPackage: dbPkg,
|
DBPackage: dbPkg,
|
||||||
@@ -269,8 +279,8 @@ func packages2slice(pkgs any) []string {
|
|||||||
switch v := pkgs.(type) {
|
switch v := pkgs.(type) {
|
||||||
case []srcinfo.Package:
|
case []srcinfo.Package:
|
||||||
var sPkgs []string
|
var sPkgs []string
|
||||||
for i := range v {
|
for _, p := range v {
|
||||||
sPkgs = append(sPkgs, v[i].Pkgname)
|
sPkgs = append(sPkgs, p.Pkgname)
|
||||||
}
|
}
|
||||||
|
|
||||||
return sPkgs
|
return sPkgs
|
||||||
@@ -333,46 +343,36 @@ func initALPM(root, dbpath string) (*alpm.Handle, error) {
|
|||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupChroot(ctx context.Context) error {
|
func setupChroot() error {
|
||||||
_, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
_, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
cmd := exec.CommandContext(ctx, "arch-nspawn", "-C", pacmanConf, //nolint:gosec
|
cmd := exec.Command("arch-nspawn", "-C", pacmanConf, filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), //nolint:gosec
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "pacman", "-Syuu", "--noconfirm")
|
"pacman", "-Syuu", "--noconfirm")
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error updating chroot: %w: %s", err, string(res))
|
return fmt.Errorf("error updating chroot: %w\n%s", err, string(res))
|
||||||
}
|
}
|
||||||
case os.IsNotExist(err):
|
case os.IsNotExist(err):
|
||||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0o755)
|
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0o755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cmd := exec.CommandContext(ctx, "mkarchroot", "-C", pacmanConf, "-M", makepkgConf, //nolint:gosec
|
cmd := exec.Command("mkarchroot", "-C", pacmanConf, "-M", makepkgConf, //nolint:gosec
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel", "multilib-devel")
|
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel", "multilib-devel")
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating chroot: %w: %s", err, string(res))
|
return fmt.Errorf("error creating chroot: %w\n%s", err, string(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy pacman.conf into pristine chroot to enable multilib
|
cmd = exec.Command("sudo", "cp", pacmanConf, //nolint:gosec
|
||||||
cmd = exec.CommandContext(ctx, "sudo", "cp", pacmanConf, //nolint:gosec
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/pacman.conf"))
|
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/pacman.conf"))
|
||||||
res, err = cmd.CombinedOutput()
|
res, err = cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error copying pacman.conf to chroot: %w: %s", err, string(res))
|
return fmt.Errorf("error copying pacman.conf to chroot: %w\n%s", err, string(res))
|
||||||
}
|
|
||||||
|
|
||||||
// remove makepkg conf extension, they are covered by our custom makepkg
|
|
||||||
cmd = exec.CommandContext(ctx, "sudo", "rm_chroot.py", //nolint:gosec
|
|
||||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/makepkg.conf.d"))
|
|
||||||
res, err = cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error removing makepkg.conf.d from chroot: %w: %s", err, string(res))
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
@@ -380,7 +380,7 @@ func setupChroot(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncMarchs(ctx context.Context) error {
|
func syncMarchs() error {
|
||||||
files, err := os.ReadDir(conf.Basedir.Repo)
|
files, err := os.ReadDir(conf.Basedir.Repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -412,9 +412,9 @@ func syncMarchs(ctx context.Context) error {
|
|||||||
for _, repo := range conf.Repos {
|
for _, repo := range conf.Repos {
|
||||||
fRepo := fmt.Sprintf("%s-%s", repo, march)
|
fRepo := fmt.Sprintf("%s-%s", repo, march)
|
||||||
repos = append(repos, fRepo)
|
repos = append(repos, fRepo)
|
||||||
buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, 1000)
|
buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, 1000) //nolint:gomnd
|
||||||
buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 1000)
|
buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 1000) //nolint:gomnd
|
||||||
go buildManager.repoWorker(ctx, fRepo)
|
go buildManager.repoWorker(fRepo)
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)); os.IsNotExist(err) {
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)); os.IsNotExist(err) {
|
||||||
log.Debugf("creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))
|
log.Debugf("creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))
|
||||||
@@ -480,7 +480,7 @@ func parseFlagSection(section any, makepkgConf, march string) (string, error) {
|
|||||||
for subSec, subMap := range ct {
|
for subSec, subMap := range ct {
|
||||||
varsReg := reVar.FindAllStringSubmatch(makepkgConf, -1)
|
varsReg := reVar.FindAllStringSubmatch(makepkgConf, -1)
|
||||||
if varsReg == nil {
|
if varsReg == nil {
|
||||||
return "", errors.New("no match in config found")
|
return "", fmt.Errorf("no match in config found")
|
||||||
}
|
}
|
||||||
|
|
||||||
var flags []string
|
var flags []string
|
||||||
@@ -497,8 +497,6 @@ func parseFlagSection(section any, makepkgConf, march string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := subMap.(string); ok && len(orgMatch) > 0 {
|
if _, ok := subMap.(string); ok && len(orgMatch) > 0 {
|
||||||
log.Debugf("replace %s with %s", orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
|
|
||||||
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
|
|
||||||
makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
|
makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
|
||||||
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
|
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
|
||||||
continue
|
continue
|
||||||
@@ -507,18 +505,12 @@ func parseFlagSection(section any, makepkgConf, march string) (string, error) {
|
|||||||
if len(orgMatch) == 0 {
|
if len(orgMatch) == 0 {
|
||||||
// no match found, assume env var and append it
|
// no match found, assume env var and append it
|
||||||
log.Debugf("no match found for %s:%v, appending", subSec, subMap)
|
log.Debugf("no match found for %s:%v, appending", subSec, subMap)
|
||||||
switch sm := subMap.(type) {
|
if strings.Contains(subMap.(string), " ") {
|
||||||
case string:
|
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(subMap.(string), replaceMap))
|
||||||
if strings.Contains(sm, " ") {
|
|
||||||
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(sm, replaceMap))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
makepkgConf += fmt.Sprintf("\nexport %s=%s", strings.ToUpper(subSec.(string)), replaceStringsFromMap(sm, replaceMap))
|
|
||||||
continue
|
|
||||||
case []string:
|
|
||||||
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(strings.Join(sm, " "), replaceMap)) //nolint:lll
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
makepkgConf += fmt.Sprintf("\nexport %s=%s", strings.ToUpper(subSec.(string)), replaceStringsFromMap(subMap.(string), replaceMap))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("original %s: %v (%d)", subSec, flags, len(flags))
|
log.Debugf("original %s: %v (%d)", subSec, flags, len(flags))
|
||||||
@@ -551,24 +543,7 @@ func setupMakepkg(march string, flags map[string]any) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
makepkgStrBuilder := new(strings.Builder)
|
makepkgStr := string(t)
|
||||||
makepkgStrBuilder.Write(t)
|
|
||||||
|
|
||||||
// read makepkg conf.d
|
|
||||||
makepkgConfExt, err := Glob(filepath.Join(makepkgConfExt, "*.conf"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, makepkgExt := range makepkgConfExt {
|
|
||||||
ext, err := os.ReadFile(makepkgExt)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
makepkgStrBuilder.Write(ext)
|
|
||||||
}
|
|
||||||
|
|
||||||
makepkgStr := makepkgStrBuilder.String()
|
|
||||||
|
|
||||||
makepkgStr, err = parseFlagSection(flags["common"], makepkgStr, march)
|
makepkgStr, err = parseFlagSection(flags["common"], makepkgStr, march)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -598,7 +573,7 @@ func setupMakepkg(march string, flags map[string]any) error {
|
|||||||
func parseState(state string) (*StateInfo, error) {
|
func parseState(state string) (*StateInfo, error) {
|
||||||
ss := strings.Split(state, " ")
|
ss := strings.Split(state, " ")
|
||||||
if len(ss) != 4 {
|
if len(ss) != 4 {
|
||||||
return nil, errors.New("invalid state file")
|
return nil, fmt.Errorf("invalid state file")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &StateInfo{
|
return &StateInfo{
|
||||||
@@ -670,16 +645,16 @@ func Glob(pattern string) ([]string, error) {
|
|||||||
|
|
||||||
func (globs Globs) Expand() ([]string, error) {
|
func (globs Globs) Expand() ([]string, error) {
|
||||||
var matches = []string{""}
|
var matches = []string{""}
|
||||||
for _, g := range globs {
|
for _, glob := range globs {
|
||||||
var hits []string
|
var hits []string
|
||||||
var hitMap = map[string]bool{}
|
var hitMap = map[string]bool{}
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
paths, err := filepath.Glob(match + g)
|
paths, err := filepath.Glob(match + glob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
err = filepath.WalkDir(path, func(path string, _ os.DirEntry, err error) error {
|
err = filepath.WalkDir(path, func(path string, d os.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fs.SkipDir
|
return fs.SkipDir
|
||||||
}
|
}
|
||||||
@@ -703,190 +678,3 @@ func (globs Globs) Expand() ([]string, error) {
|
|||||||
|
|
||||||
return matches, nil
|
return matches, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func MatchGlobList(target string, globs []string) bool {
|
|
||||||
for _, lGlob := range globs {
|
|
||||||
tGlob, err := glob.Compile(lGlob)
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("failed to compile glob %s: %v", lGlob, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if tGlob.Match(target) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func Copy(srcPath, dstPath string) (err error) {
|
|
||||||
r, err := os.Open(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func(r *os.File) {
|
|
||||||
_ = r.Close()
|
|
||||||
}(r)
|
|
||||||
|
|
||||||
w, err := os.Create(dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if c := w.Close(); err == nil {
|
|
||||||
err = c
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = io.Copy(w, r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadSRCINFO(pkg, tag string) (*srcinfo.Srcinfo, error) {
|
|
||||||
resp, err := http.Get(fmt.Sprintf(
|
|
||||||
"https://gitlab.archlinux.org/archlinux/packaging/packages/%s/-/raw/%s/.SRCINFO", pkg, tag),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, errors.New(resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
bResp, err := io.ReadAll(resp.Body)
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nSrcInfo, err := srcinfo.Parse(string(bResp))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nSrcInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDescendantPIDs(rootPID int) ([]int, error) {
|
|
||||||
pidToPpid := map[int]int{}
|
|
||||||
var descendants []int
|
|
||||||
|
|
||||||
procEntries, err := os.ReadDir("/proc")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range procEntries {
|
|
||||||
if !entry.IsDir() || entry.Name()[0] < '0' || entry.Name()[0] > '9' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pidStr := entry.Name()
|
|
||||||
pid, err := strconv.Atoi(pidStr)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
statusPath := filepath.Join("/proc", pidStr, "status")
|
|
||||||
data, err := os.ReadFile(statusPath)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, line := range strings.Split(string(data), "\n") {
|
|
||||||
if strings.HasPrefix(line, "PPid:") {
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) == 2 {
|
|
||||||
ppid, _ := strconv.Atoi(fields[1])
|
|
||||||
pidToPpid[pid] = ppid
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var walk func(int)
|
|
||||||
walk = func(current int) {
|
|
||||||
for pid, ppid := range pidToPpid {
|
|
||||||
if ppid == current {
|
|
||||||
descendants = append(descendants, pid)
|
|
||||||
walk(pid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
walk(rootPID)
|
|
||||||
return descendants, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MemStats struct {
|
|
||||||
RSS int64
|
|
||||||
Swap int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMemoryStats(pid int) (MemStats, error) {
|
|
||||||
statusPath := fmt.Sprintf("/proc/%d/status", pid)
|
|
||||||
data, err := os.ReadFile(statusPath)
|
|
||||||
if err != nil {
|
|
||||||
return MemStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := MemStats{}
|
|
||||||
for _, line := range strings.Split(string(data), "\n") {
|
|
||||||
if strings.HasPrefix(line, "VmRSS:") {
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) >= 2 {
|
|
||||||
kb, _ := strconv.ParseInt(fields[1], 10, 64)
|
|
||||||
stats.RSS = kb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(line, "VmSwap:") {
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) >= 2 {
|
|
||||||
kb, _ := strconv.ParseInt(fields[1], 10, 64)
|
|
||||||
stats.Swap = kb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func pollMemoryUsage(pid int, interval time.Duration, done chan bool, result chan int64) {
|
|
||||||
var totalMemory int64
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
result <- totalMemory
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
var totalRSS, totalSwap int64
|
|
||||||
|
|
||||||
rootStats, err := getMemoryStats(pid)
|
|
||||||
if err == nil {
|
|
||||||
totalRSS += rootStats.RSS
|
|
||||||
totalSwap += rootStats.Swap
|
|
||||||
} else {
|
|
||||||
log.Errorf("failed to get memory stats for root process: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
descendants, err := getDescendantPIDs(pid)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to get descendants: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, dpid := range descendants {
|
|
||||||
stats, err := getMemoryStats(dpid)
|
|
||||||
if err == nil {
|
|
||||||
totalRSS += stats.RSS
|
|
||||||
totalSwap += stats.Swap
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newMemory := totalRSS + totalSwap
|
|
||||||
if newMemory > totalMemory {
|
|
||||||
totalMemory = newMemory
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(interval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user