forked from ALHP/ALHP.GO
Compare commits
396 Commits
Author | SHA1 | Date | |
---|---|---|---|
9c2fa9bc2d | |||
01404adad5 | |||
d057e18453 | |||
bcfaccfec5 | |||
f4f64e1999 | |||
48c66d429a | |||
daf6f13542 | |||
b73f1c8f60 | |||
70a09fbc7d | |||
a933dee30f | |||
9369f36c29 | |||
cc754cf4c7 | |||
185837bd3c | |||
26d33f20d3 | |||
362c5d379c | |||
8784e63a9d | |||
1c90e20a10 | |||
2e080c8268 | |||
1b76a0fcf3 | |||
7ed9dac855 | |||
9aa5ed719e | |||
3cfebda5e2 | |||
d4dbfdea1c | |||
ab238cf897 | |||
7c8626dfcb | |||
9695d146c5 | |||
286134674b | |||
15027f99e7 | |||
f3fa0664f9 | |||
f8c878edbf | |||
0e81ca2437 | |||
aadacf5979 | |||
60619e91e7 | |||
8dccbbee84 | |||
453c6d8a3a | |||
3da529478b | |||
30cf4e6ecb | |||
7f2286c8d2 | |||
fe2a3f7f9f | |||
10baa7d290 | |||
f66be19131 | |||
e3ce572dfa | |||
38a7b6562c | |||
b510954115 | |||
e73be9c9c5 | |||
322652b510 | |||
7f2d7f6251 | |||
0ca90c55bf | |||
d9efab4d2a | |||
2f2f6e7d5b | |||
2ad5c57078 | |||
55cf1b7eb4 | |||
352ce97b88 | |||
1c1f1fba17 | |||
a834c7fb04 | |||
8d6eee6bf6 | |||
cbe667cb5d | |||
16fcf0117d | |||
8c148c6e89 | |||
36578d1dfb | |||
021520b39a | |||
9822c43cf1 | |||
e6dd3fd3ef | |||
9bff418ade | |||
ff26fb2b8b | |||
0c6a96db21 | |||
552382cc00 | |||
2bf9bd0621 | |||
5c01e9afc9 | |||
44c4493500 | |||
e3f0f4230c | |||
70280aa62b | |||
bb2cb0f6b4 | |||
0e4f0f04fa | |||
d83249c809 | |||
7c826d2d1e | |||
fd8bf63e3e | |||
6c93fc86a7 | |||
f3fe7bba4f | |||
ecc3f2094e | |||
492e377ded | |||
569016b459 | |||
5f00b4e001 | |||
ea9c91bd92 | |||
b83c0ecfc8 | |||
442b793524 | |||
4b75e0f8a7 | |||
d65e4c7f5b | |||
cbb2ea927d | |||
db8cb28fc5 | |||
a04c17f6ab | |||
6da9739491 | |||
6651a2029b | |||
ddbf99756a | |||
c63fec1ce5 | |||
3bd5164682 | |||
43db75f1f9 | |||
7b0cf88367 | |||
cb93fda659 | |||
6b95ba42e1 | |||
36757dc711 | |||
025ab9a75a | |||
e8e6b44ad1 | |||
be85bc0f5a | |||
d1725d7bcd | |||
9e29310af6 | |||
3b8fe6ee0d | |||
7668239152 | |||
9c61ac10af | |||
0001c2093d | |||
6e8e7ca90e | |||
32a4b9dc9e | |||
eb3c35dc86 | |||
f8a0bbd80c | |||
3acaaa816c | |||
c59a6ed2be | |||
55aa18f218 | |||
fc70d9e625 | |||
9aa7f6fb64 | |||
47a384fa9c | |||
fa902fa68c | |||
786b70b842 | |||
6556d8ae16 | |||
9f54cc70ba | |||
7bf9242d13 | |||
52ff3fd9f6 | |||
0f75b262ce | |||
57acc40a56 | |||
e0a4e0031b | |||
fdf34d6157 | |||
067e2773ec | |||
2937d7eb93 | |||
76f1ed3a3c | |||
ac909f4e36 | |||
943b37bee2 | |||
302180011e | |||
![]() |
04f4efcfae | ||
735b13ef54 | |||
afdb796b7c | |||
01be6d5fa2 | |||
1910cc7b2d | |||
992f5d2ed7 | |||
14941136c4 | |||
cf4f91295f | |||
ece8c4c7d9 | |||
9baa7b2bcb | |||
f4d9723eb9 | |||
76980ff720 | |||
001473ef6a | |||
c2dec11aaf | |||
4431e906c3 | |||
f60e8479bc | |||
1805baedc5 | |||
e5165e1c5e | |||
2169cec100 | |||
6088643ec4 | |||
f93d14c332 | |||
ba9636cffd | |||
4570c2b3f6 | |||
ff21894aac | |||
e58a6a3ad7 | |||
762c842b8f | |||
930ac1f50d | |||
2af46109eb | |||
401802e12d | |||
6895d66651 | |||
186e1a66c3 | |||
e4041a42d9 | |||
ff963ec039 | |||
0fa42f7620 | |||
2d00347edc | |||
e88567ea93 | |||
6057cedeaa | |||
1ce963795e | |||
2fd83dfcd4 | |||
8cb15e8584 | |||
ec5eaadcc7 | |||
f204ba0e35 | |||
b0d570363f | |||
ba8d02927f | |||
e21f47f93b | |||
8653bf8c6b | |||
8d37c7b72a | |||
5ea43c896f | |||
05f36bef88 | |||
119aaa8110 | |||
f30442d100 | |||
041e7762f7 | |||
76152a3410 | |||
5ed591981b | |||
8c7a9aa205 | |||
ef61ad7dcf | |||
c8b31b3275 | |||
b48c75a177 | |||
8ff928d7fa | |||
87cf1e5631 | |||
0f46a95995 | |||
4ead1f74cd | |||
7b28db40ce | |||
1db73987fb | |||
5ea156b48a | |||
9b28ceebb8 | |||
e911be6b6e | |||
c7e193e88b | |||
b7bbb97fd3 | |||
bcf0875c1c | |||
2791ed44fa | |||
a97a4656a2 | |||
60f2b95bec | |||
4b3339125c | |||
08dd19c3ef | |||
2f1857a97c | |||
3ea2777b0f | |||
8d32d53e7a | |||
622ba6ad1a | |||
c0a4cb555b | |||
16cb95b5ea | |||
5f0ba49868 | |||
![]() |
20ea64acdc | ||
d56a25e397 | |||
df3ae03f6d | |||
47552c88d8 | |||
bb16bdcb61 | |||
bcea0e99cb | |||
f051709c4f | |||
3f30b9d6a1 | |||
be46f92cbe | |||
97b3d4f2c8 | |||
65d6e7e135 | |||
dd118c5f97 | |||
ca85586118 | |||
78666f9899 | |||
567f8d903f | |||
4153e53466 | |||
f190902b41 | |||
85d46f4ab0 | |||
f7d2e8ccc7 | |||
c2abe68483 | |||
ed2a42318e | |||
65f3247d20 | |||
270889bf5c | |||
ba159e0a72 | |||
0f98b9d18f | |||
e4daa72230 | |||
2631aada64 | |||
47f0fb6c66 | |||
99a2519ecb | |||
26e9076013 | |||
db3aa9e17d | |||
31cf59e6e9 | |||
34ba3398e8 | |||
9a41ee9387 | |||
5841d1c201 | |||
312fe9ef40 | |||
c7ba7340a5 | |||
45672e3459 | |||
e9951f5d3b | |||
adad7c1acd | |||
f0ac9d6052 | |||
b92b68f332 | |||
89d55a6bfd | |||
b8967e6080 | |||
ea8fe72158 | |||
7c993e1bc1 | |||
3a6dc45089 | |||
b7a49ec6c1 | |||
211981512b | |||
b484f7d4d7 | |||
02928a4d0d | |||
9d79bf0613 | |||
79f7690a68 | |||
b9dec56a31 | |||
761cc47c3a | |||
d574252d9b | |||
bc23d2782a | |||
7265635609 | |||
e1df856398 | |||
31af024b6a | |||
da6df92ed1 | |||
b734d9bcb4 | |||
aac66e2ae9 | |||
fafa03524e | |||
907add4e07 | |||
2dfdac8468 | |||
80fa39125d | |||
626dcc7f5a | |||
f3fb29977b | |||
5432ea326d | |||
4bd831142f | |||
ade6395c11 | |||
750995c630 | |||
fbf1dec948 | |||
97aaad1785 | |||
1ccb9be510 | |||
e0e76af757 | |||
f009e9ff00 | |||
42e95d0b75 | |||
70d901b113 | |||
c2dd455892 | |||
fdde0386ec | |||
3a6e4be5cf | |||
020748af00 | |||
5ccc864876 | |||
367337bf9e | |||
d453a705da | |||
4f5f6ecff7 | |||
b303531ce1 | |||
798d349fc3 | |||
77a5ec6321 | |||
d8965d735e | |||
b4670b5cce | |||
86f76d0bce | |||
9d9b8a21bc | |||
3afedf6369 | |||
0690ec28d2 | |||
bafb7a3b83 | |||
d65cab3303 | |||
23a0fddea3 | |||
8e4c056e7e | |||
7b8dee4650 | |||
14f39badc6 | |||
24f275877c | |||
6ec9e8c623 | |||
454bc4407c | |||
d5cf6989de | |||
a013f78937 | |||
1fc703c6a9 | |||
08897cbeab | |||
accaa98d32 | |||
be276f9ead | |||
1bff197ce9 | |||
36487b8610 | |||
8dcc00c4d8 | |||
45447d3545 | |||
add4f544a1 | |||
5ea9065081 | |||
2ca7b81b3c | |||
785ada62e1 | |||
1645c64314 | |||
faa98c8494 | |||
bad3bc36a6 | |||
743b08bba1 | |||
d5e9832b33 | |||
778e91ef08 | |||
d3c43df2f8 | |||
08ac3b9121 | |||
45df9fbe4d | |||
3878a76084 | |||
2764aff4f6 | |||
67e9d30f6e | |||
cd4cdadb66 | |||
d70b523aa9 | |||
de6bf0ea01 | |||
a99704fba0 | |||
be96c979ba | |||
592034c314 | |||
ce679933f9 | |||
909e7d8983 | |||
941c7e36f1 | |||
8b8c441a8e | |||
2d9e4e166a | |||
2f15cebd3e | |||
2ca1b37eab | |||
fe822b4765 | |||
ca7cbe72ee | |||
3cfcd90024 | |||
271d7f94bf | |||
7483de8474 | |||
e2aa79e86f | |||
27359c91aa | |||
5cbb751ce4 | |||
8c1e8c1835 | |||
7347c7705b | |||
7d3da02bf2 | |||
16d45fe662 | |||
2928339861 | |||
23b8c8a597 | |||
76b21b7770 | |||
66a21cba22 | |||
7baccce64b | |||
3e8f4711f0 | |||
908fad0923 | |||
4c8937c707 | |||
5d6a498503 | |||
f9c5292f90 | |||
1ff8b8fa7d | |||
377998cf0e | |||
e38a1db340 | |||
c3b12f10f9 | |||
8467b17ffc | |||
2409fcdee9 | |||
2aa2a1e8cf | |||
ef402523c8 | |||
c77ec6d140 | |||
e6ac0a1a6e | |||
c9a8f88e25 |
94
.golangci.yaml
Normal file
94
.golangci.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
linters-settings:
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 4
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- whyNoLint
|
||||
- filepathJoin
|
||||
mnd:
|
||||
checks:
|
||||
- argument
|
||||
- case
|
||||
- condition
|
||||
- return
|
||||
ignored-numbers:
|
||||
- '0'
|
||||
- '1'
|
||||
- '2'
|
||||
- '3'
|
||||
- '4'
|
||||
- '5'
|
||||
- '6'
|
||||
- '7'
|
||||
- '8'
|
||||
- '9'
|
||||
- '10'
|
||||
- '100'
|
||||
- '1000'
|
||||
ignored-functions:
|
||||
- strings.SplitN
|
||||
- os.OpenFile
|
||||
- os.MkdirAll
|
||||
- os.WriteFile
|
||||
govet:
|
||||
check-shadowing: false
|
||||
lll:
|
||||
line-length: 140
|
||||
misspell:
|
||||
locale: US
|
||||
nolintlint:
|
||||
allow-unused: false # report any unused nolint directives
|
||||
require-explanation: false # don't require an explanation for nolint directives
|
||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||
tagliatelle:
|
||||
case:
|
||||
use-field-name: true
|
||||
rules:
|
||||
# Any struct tag type can be used.
|
||||
# Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`.
|
||||
json: snake
|
||||
yaml: snake
|
||||
xml: camel
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- gochecknoglobals
|
||||
- depguard
|
||||
- gci
|
||||
- gofumpt
|
||||
- goimports
|
||||
- varnamelen
|
||||
- funlen
|
||||
- cyclop
|
||||
- wsl
|
||||
- nosnakecase
|
||||
- nlreturn
|
||||
- godot
|
||||
- nestif
|
||||
- wrapcheck
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- maintidx
|
||||
- nonamedreturns
|
||||
- exhaustivestruct
|
||||
- exhaustruct
|
||||
- forcetypeassert
|
||||
- godox
|
||||
- nakedret
|
||||
- tagalign
|
||||
- maligned
|
||||
# remove for new projects
|
||||
- errname
|
||||
- goerr113
|
||||
- depguard
|
||||
- noctx
|
225
README.md
225
README.md
@@ -1,15 +1,45 @@
|
||||
# alhp
|
||||
# ALHP
|
||||
|
||||
Build script for archlinux instructionset enabled repos. All packages are build with `-march=<cpu-set> -O3`. Some
|
||||
packages will fail to build, they will just be provided from the official repos as usual.
|
||||
[](https://status.alhp.dev)
|
||||
[](https://goreportcard.com/report/somegit.dev/ALHP/ALHP.GO)
|
||||
[](https://pkg.go.dev/somegit.dev/ALHP/ALHP.GO)
|
||||
[](https://somegit.dev/anonfunc/ALHP.GO/src/branch/master/LICENSE)
|
||||
[](https://liberapay.com/anonfunc/)
|
||||
|
||||
[Package status page](https://alhp.anonfunc.dev/packages.html)
|
||||
Buildbot for Archlinux based repos with different
|
||||
[x86-64 feature levels](https://www.phoronix.com/scan.php?page=news_item&px=GCC-11-x86-64-Feature-Levels), `-O3` and
|
||||
[LTO](https://en.wikipedia.org/wiki/Interprocedural_optimization).
|
||||
|
||||
## Check your system for support
|
||||
> [!WARNING]
|
||||
> NVIDIA graphics users using the **proprietary driver** are strongly encouraged to read the
|
||||
> [FAQ about Linux kernel modules](#directly-linked-kernel-modules) before enabling any repos.
|
||||
|
||||
**Important**: Before you enable any of these repos, check if your system supports x86-64-v3. You can do that
|
||||
with `/lib/ld-linux-x86-64.so.2 --help`. If you don't check beforehand you might be unable to boot your system anymore
|
||||
and need to downgrade any package that you may have upgraded.
|
||||
---
|
||||
<!-- TOC -->
|
||||
* [Quick Start](#quick-start)
|
||||
* [FAQ](#faq)
|
||||
* [Matrix](#matrix)
|
||||
* [Donations](#donations)
|
||||
* [License and Legal](#license-and-legal)
|
||||
<!-- TOC -->
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Check your system for support
|
||||
|
||||
> [!CAUTION]
|
||||
> Before enabling any of these repos, make sure that your system supports the level of functionality you want to
|
||||
> enable (e.g. `x86-64-v3`).
|
||||
> **If you don't check first, you may not be able to boot your system and will have to downgrade any packages you may
|
||||
have upgraded.**
|
||||
|
||||
Check which feature levels your CPU supports with
|
||||
|
||||
```bash
|
||||
/lib/ld-linux-x86-64.so.2 --help
|
||||
```
|
||||
|
||||
Example output snippet for a system supporting up to `x86-64-v3`:
|
||||
|
||||
@@ -20,116 +50,157 @@ Subdirectories of glibc-hwcaps directories, in priority order:
|
||||
x86-64-v2 (supported, searched)
|
||||
```
|
||||
|
||||
## Enable Repos
|
||||
> [!NOTE]
|
||||
> ALHP repos for `x86-64-v2`, `x86-64-v3` and `x86-64-v4` are currently available. You can see all available
|
||||
> repositories [here](https://alhp.dev/).
|
||||
|
||||
To enable these complement repos you need to add them above the regular repos in `/etc/pacman.conf`
|
||||
### 2. Install keyring & mirrorlist
|
||||
|
||||
### Example pacman.conf
|
||||
Install [alhp-keyring](https://aur.archlinux.org/packages/alhp-keyring/)
|
||||
and [alhp-mirrorlist](https://aur.archlinux.org/packages/alhp-mirrorlist/) from the **AUR**.
|
||||
|
||||
Example with `yay`:
|
||||
|
||||
```bash
|
||||
yay -S alhp-keyring alhp-mirrorlist
|
||||
```
|
||||
|
||||
`alhp-keyring` provides the current signing keys used by ALHP, `alhp-mirrorlist` a selection of mirrors.
|
||||
|
||||
### 3. Choose a mirror (optional)
|
||||
|
||||
Edit `/etc/pacman.d/alhp-mirrorlist` and comment in/out the mirrors you want to enable/disable.
|
||||
By default, a CDN mirror provided by ALHP is selected.
|
||||
> [!NOTE]
|
||||
> `cdn.alhp.dev` and `alhp.dev` are provided directly by ALHP. If you have problems with a mirror,
|
||||
> open an issue at [the mirrorlist repo](https://somegit.dev/ALHP/alhp-mirrorlist).
|
||||
|
||||
### 4. Modify pacman.conf
|
||||
|
||||
Add the ALHP repos to your `/etc/pacman.conf`. Make sure the appropriate ALHP repository is **above** the Archlinux
|
||||
repo.
|
||||
|
||||
Example for `x86-64-v3`:
|
||||
|
||||
```editorconfig
|
||||
[core-x86-64-v3]
|
||||
Server = https://alhp.harting.dev/$repo/os/$arch/
|
||||
|
||||
[extra-x86-64-v3]
|
||||
Server = https://alhp.harting.dev/$repo/os/$arch/
|
||||
|
||||
[community-x86-64-v3]
|
||||
Server = https://alhp.harting.dev/$repo/os/$arch/
|
||||
Include = /etc/pacman.d/alhp-mirrorlist
|
||||
|
||||
[core]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[extra-x86-64-v3]
|
||||
Include = /etc/pacman.d/alhp-mirrorlist
|
||||
|
||||
[extra]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[community]
|
||||
# if you need [multilib] support
|
||||
[multilib-x86-64-v3]
|
||||
Include = /etc/pacman.d/alhp-mirrorlist
|
||||
|
||||
[multilib]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
```
|
||||
|
||||
Replace `x86-64-v3` with your cpu-set. More information about all available options on [this gcc page](https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html).
|
||||
Currently, alhp.harting.dev only builds for `x86-64-v3` (list is subject to change).
|
||||
You can see all available repositories [here](https://alhp.harting.dev/).
|
||||
Replace `x86-64-v3` with the x86-64 feature level you want to enable.
|
||||
|
||||
After finished adding the repos to `pacman.conf` you need to import and sign the used pgp key:
|
||||
> [!TIP]
|
||||
> Multiple layers can be stacked as described in https://somegit.dev/ALHP/ALHP.GO/issues/255#issuecomment-3335.
|
||||
|
||||
Import:
|
||||
```
|
||||
pacman-key --keyserver keyserver.ubuntu.com --recv-keys 0D4D2FDAF45468F3DDF59BEDE3D0D2CD3952E298
|
||||
```
|
||||
|
||||
Local sign:
|
||||
|
||||
```
|
||||
pacman-key --lsign-key 0D4D2FDAF45468F3DDF59BEDE3D0D2CD3952E298
|
||||
```
|
||||
|
||||
Update package database and upgrade:
|
||||
### 5. Update package database and upgrade
|
||||
|
||||
```
|
||||
pacman -Suy
|
||||
```
|
||||
|
||||
## Remove Repos
|
||||
## FAQ
|
||||
|
||||
To disable ALHP remove all *-x86-64-v3 entries in `/etc/pacman.conf`.
|
||||
### Remove ALHP packages
|
||||
|
||||
After that you can refresh pacmans databases and downgrade all packages like:
|
||||
To disable ALHP, remove all *x86-64-vX* entries in `/etc/pacman.conf` and remove `alhp-keyring` and `alhp-mirrorlist`.
|
||||
|
||||
After that, you can update pacman's databases and downgrade all packages, like
|
||||
|
||||
```
|
||||
pacman -Suuy
|
||||
```
|
||||
|
||||
## Package eligibility
|
||||
|
||||
Packages [excluded](https://www.reddit.com/r/archlinux/comments/oflged/alhp_archlinux_recompiled_for_x8664v3_experimental/h4fkinu?utm_source=share&utm_medium=web2x&context=3)
|
||||
from building (besides all 'any' architecture packages) are being listed in issue #16.
|
||||
Also [package status page](https://alhp.anonfunc.dev/packages.html).
|
||||
|
||||
## FAQ
|
||||
|
||||
### LTO
|
||||
|
||||
Enabled for all packages build after 04 Nov 2021 12:07:00
|
||||
UTC. [More details.](https://git.harting.dev/anonfunc/ALHP.GO/issues/52)
|
||||
Enabled for all packages built after 04 Nov 2021 12:07:00
|
||||
UTC. [More details.](https://somegit.dev/ALHP/ALHP.GO/issues/52)
|
||||
LTO status is visible per package on the package status page.
|
||||
|
||||
### error: *-x86-64-v3: signature from "Archlinux CIE Repos (Build 2020/2021) <cie@harting.dev>" is unknown trust
|
||||
### Linux Kernel packages
|
||||
|
||||
You get this because the new, extended key has unknown trust value attached to it. To fix it, first import the key again
|
||||
to be sure you got the extended one:
|
||||
`pacman-key --keyserver keyserver.ubuntu.com --recv-keys 0D4D2FDAF45468F3DDF59BEDE3D0D2CD3952E298`
|
||||
`KCFLAGS`/`KCPPFLAGS` are used to build the kernel packages with our additional flags.
|
||||
|
||||
After that you just have to set the trust on this key with (as root, for `pacman-key`):
|
||||
### Directly linked kernel modules
|
||||
|
||||
Due to our increase in pkgrel, building the kernel packages **will break any directly linked modules** such as `nvidia`
|
||||
(not `nvidia-dkms`) or `virtualbox-host-modules-arch` (not `virtualbox-host-dkms`). **Their respective `dkms`-variant is
|
||||
not affected**. This issue is being tracked in #68, a solution is being worked on.
|
||||
|
||||
### Mirrors
|
||||
|
||||
You want to mirror ALHP? You are welcome to do
|
||||
so, [see alhp-mirrorlist for how to become one](https://somegit.dev/ALHP/alhp-mirrorlist#how-to-become-a-mirror).
|
||||
|
||||
### What packages are built
|
||||
|
||||
Packages [excluded](https://www.reddit.com/r/archlinux/comments/oflged/alhp_archlinux_recompiled_for_x8664v3_experimental/h4fkinu?utm_source=share&utm_medium=web2x&context=3)
|
||||
from building (besides all `any` architecture packages) are being listed in issue #16.
|
||||
See also [package status page](https://status.alhp.dev) (search for `blacklisted`).
|
||||
|
||||
### Why is package X not up-to-date
|
||||
|
||||
Also relevant for: **I can't find package X / Application X fails to start because it links to an old/newer lib**
|
||||
|
||||
ALHP builds packages **after** they are released in the official Archlinux repos (excluding `[*-testing]`).
|
||||
This will cause packages to be delayed if the current batch contains many packages, or packages that take a while to
|
||||
build (e.g. `chromium`).
|
||||
|
||||
You can always check on the progress of the current build cycle on the [package status page](https://status.alhp.dev).
|
||||
Please refrain from opening issues caused by packages currently in queue/not yet build/not yet moved to the repo.
|
||||
Please keep in mind that large rebuilds such as `openssl` or `python` can take days to complete on our current build
|
||||
hardware.
|
||||
|
||||
### Debug symbols
|
||||
|
||||
ALHP provides a debuginfod instance under `debuginfod.alhp.dev`.
|
||||
|
||||
To use it, have `debuginfod` installed on your system and add it to your `DEBUGINFOD_URLS` with:
|
||||
|
||||
```bash
|
||||
echo "https://debuginfod.alhp.dev" > /etc/debuginfod/alhp.urls
|
||||
```
|
||||
pacman-key --edit-key 0D4D2FDAF45468F3DDF59BEDE3D0D2CD3952E298
|
||||
|
||||
pub rsa4096/E3D0D2CD3952E298
|
||||
created: 2020-08-12 expires: 2022-07-09 usage: SC
|
||||
trust: unknown validity: unknown
|
||||
[ unknown] (1). Archlinux CIE Repos (Build 2020/2021) <cie@harting.dev>
|
||||
### Switch between levels
|
||||
|
||||
gpg> trust
|
||||
pub rsa4096/E3D0D2CD3952E298
|
||||
created: 2020-08-12 expires: 2022-07-09 usage: SC
|
||||
trust: unknown validity: unknown
|
||||
[ unknown] (1). Archlinux CIE Repos (Build 2020/2021) <cie@harting.dev>
|
||||
If you want to switch between levels, e.g. from `x86-64-v3` to `x86-64-v4`, you need to revert to official packages
|
||||
first, and then enable your desired repos again.
|
||||
|
||||
Please decide how far you trust this user to correctly verify other users' keys
|
||||
(by looking at passports, checking fingerprints from different sources, etc.)
|
||||
1. Comment out or remove the ALHP repo entries in `/etc/pacman.conf`.
|
||||
2. Downgrade packages with `pacman -Suuy`.
|
||||
3. Clear pacman's package cache with `pacman -Scc`.
|
||||
4. Uncomment/add your desired repos to `/etc/pacman.conf` and update with `pacman -Suy`.
|
||||
|
||||
1 = I don't know or won't say
|
||||
2 = I do NOT trust
|
||||
3 = I trust marginally
|
||||
4 = I trust fully
|
||||
5 = I trust ultimately
|
||||
m = back to the main menu
|
||||
## Matrix
|
||||
|
||||
Your decision? 4
|
||||
````
|
||||
For any non-issue questions, or if you just want to chat, ALHP has a Matrix
|
||||
room [here](https://matrix.to/#/#alhp:ofsg.eu) (`#alhp@ofsg.eu`). You can also find me (@idlegandalf)
|
||||
in `#archlinux:archlinux.org`.
|
||||
|
||||
### Donations
|
||||
## Donations
|
||||
|
||||
I appreciate any money you want to throw my way, but donations are strictly optional.
|
||||
Also consider [donating to the Archlinux Team](https://archlinux.org/donate/), without their work ALHP would not be possible.
|
||||
I appreciate any money you want to throw my way, but donations are strictly optional. Donations are primarily used to
|
||||
pay for server costs. Also consider [donating to the **Archlinux Team**](https://archlinux.org/donate/), without their
|
||||
work ALHP would not be possible.
|
||||
|
||||
[Donate on LiberaPay](https://liberapay.com/anonfunc/)
|
||||
[](https://liberapay.com/anonfunc/)
|
||||
|
||||
## License and Legal
|
||||
|
||||
This project and all of its source code is released under the terms of the GNU General Public License, version 2
|
||||
or any later version. See [LICENSE](https://somegit.dev/ALHP/ALHP.GO/src/branch/master/LICENSE) for details.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
[Unit]
|
||||
Description=Go based Archlinux instructionset enabled repo build manager.
|
||||
Description=Go based Archlinux instruction-set enabled repo build manager.
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
@@ -12,6 +12,10 @@ TimeoutStopSec=5min
|
||||
MemoryHigh=30G
|
||||
CPUQuota=700%
|
||||
Nice=15
|
||||
CPUSchedulingPolicy=batch
|
||||
IOSchedulingClass=best-effort
|
||||
IOSchedulingPriority=7
|
||||
IOWeight=100
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
480
buildmanager.go
Normal file
480
buildmanager.go
Normal file
@@ -0,0 +1,480 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sethvargo/go-retry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const MaxUnknownBuilder = 2
|
||||
|
||||
type BuildManager struct {
|
||||
repoPurge map[string]chan []*ProtoPackage
|
||||
repoAdd map[string]chan []*ProtoPackage
|
||||
repoWG *sync.WaitGroup
|
||||
alpmMutex *sync.RWMutex
|
||||
building []*ProtoPackage
|
||||
buildingLock *sync.RWMutex
|
||||
queueSignal chan struct{}
|
||||
metrics struct {
|
||||
queueSize *prometheus.GaugeVec
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) error {
|
||||
var (
|
||||
doneQ []*ProtoPackage
|
||||
doneQLock = new(sync.RWMutex)
|
||||
unknownBuilds bool
|
||||
queueNoMatch bool
|
||||
)
|
||||
|
||||
for len(doneQ) != len(queue) {
|
||||
up := 0
|
||||
b.buildingLock.RLock()
|
||||
if (pkgList2MaxMem(b.building) < conf.Build.MemoryLimit &&
|
||||
!unknownBuilds && !queueNoMatch) ||
|
||||
(unknownBuilds && len(b.building) < MaxUnknownBuilder) {
|
||||
queueNoMatch = true
|
||||
b.buildingLock.RUnlock()
|
||||
for _, pkg := range queue {
|
||||
// check if package is already build
|
||||
doneQLock.RLock()
|
||||
if ContainsPkg(doneQ, pkg, true) {
|
||||
doneQLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
doneQLock.RUnlock()
|
||||
|
||||
// check if package is already building (we do not build packages from different marchs simultaneously)
|
||||
b.buildingLock.RLock()
|
||||
if ContainsPkg(b.building, pkg, false) {
|
||||
log.Debugf("[Q] skipped already building package %s->%s", pkg.FullRepo, pkg.Pkgbase)
|
||||
b.buildingLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
b.buildingLock.RUnlock()
|
||||
|
||||
// only check for memory on known-memory-builds
|
||||
// otherwise build them one-at-a-time
|
||||
// TODO: add initial compile mode for new repos
|
||||
if !unknownBuilds {
|
||||
// check if package has unknown memory usage
|
||||
if pkg.DBPackage.MaxRss == nil {
|
||||
log.Debugf("[Q] skipped unknown package %s->%s", pkg.FullRepo, pkg.Pkgbase)
|
||||
up++
|
||||
continue
|
||||
}
|
||||
|
||||
// check if package can be built with current memory limit
|
||||
if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit { //nolint:gosec
|
||||
log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase,
|
||||
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit) //nolint:gosec
|
||||
doneQLock.Lock()
|
||||
doneQ = append(doneQ, pkg)
|
||||
doneQLock.Unlock()
|
||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
|
||||
continue
|
||||
}
|
||||
|
||||
b.buildingLock.RLock()
|
||||
currentMemLoad := pkgList2MaxMem(b.building)
|
||||
b.buildingLock.RUnlock()
|
||||
|
||||
// check if package can be build right now
|
||||
if currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit { //nolint:gosec
|
||||
log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s",
|
||||
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March) //nolint:gosec
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
b.buildingLock.RLock()
|
||||
if len(b.building) >= MaxUnknownBuilder {
|
||||
b.buildingLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
b.buildingLock.RUnlock()
|
||||
}
|
||||
|
||||
b.buildingLock.Lock()
|
||||
b.building = append(b.building, pkg)
|
||||
b.buildingLock.Unlock()
|
||||
queueNoMatch = false
|
||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
|
||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Inc()
|
||||
|
||||
go func(pkg *ProtoPackage) {
|
||||
dur, err := pkg.build(ctx)
|
||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Dec()
|
||||
if err != nil && !errors.Is(err, ErrorNotEligible) {
|
||||
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
|
||||
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
} else if err == nil {
|
||||
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
|
||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "built").Inc()
|
||||
}
|
||||
doneQLock.Lock()
|
||||
b.buildingLock.Lock()
|
||||
doneQ = append(doneQ, pkg)
|
||||
|
||||
for i := 0; i < len(b.building); i++ {
|
||||
if b.building[i].PkgbaseEquals(pkg, true) {
|
||||
b.building = append(b.building[:i], b.building[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
doneQLock.Unlock()
|
||||
b.buildingLock.Unlock()
|
||||
b.queueSignal <- struct{}{}
|
||||
}(pkg)
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[Q] memory/build limit reached, waiting for package to finish...")
|
||||
b.buildingLock.RUnlock()
|
||||
<-b.queueSignal
|
||||
queueNoMatch = false
|
||||
}
|
||||
|
||||
// if only unknown packages are left, enable unknown buildmode
|
||||
b.buildingLock.RLock()
|
||||
if up == len(queue)-(len(doneQ)+len(b.building)) {
|
||||
unknownBuilds = true
|
||||
}
|
||||
b.buildingLock.RUnlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
|
||||
for {
|
||||
select {
|
||||
case pkgL := <-b.repoAdd[repo]:
|
||||
b.repoWG.Add(1)
|
||||
toAdd := make([]string, 0)
|
||||
for _, pkg := range pkgL {
|
||||
toAdd = append(toAdd, pkg.PkgFiles...)
|
||||
}
|
||||
|
||||
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"}
|
||||
args = append(args, toAdd...)
|
||||
cmd := exec.CommandContext(ctx, "repo-add", args...)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil && cmd.ProcessState.ExitCode() != 1 {
|
||||
log.Panicf("%s while repo-add: %v", string(res), err)
|
||||
}
|
||||
|
||||
for _, pkg := range pkgL {
|
||||
err = pkg.toDBPackage(ctx, true)
|
||||
if err != nil {
|
||||
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pkgUpd := pkg.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusLatest).
|
||||
ClearSkipReason().
|
||||
SetRepoVersion(pkg.Version).
|
||||
SetTagRev(pkg.State.TagRev)
|
||||
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March,
|
||||
pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
|
||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsAvailable)
|
||||
} else {
|
||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
|
||||
}
|
||||
if pkg.DBPackage, err = pkgUpd.Save(ctx); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("error running paccache: %v", err)
|
||||
}
|
||||
|
||||
err = updateLastUpdated()
|
||||
if err != nil {
|
||||
log.Warningf("error updating lastupdate: %v", err)
|
||||
}
|
||||
b.repoWG.Done()
|
||||
case pkgL := <-b.repoPurge[repo]:
|
||||
for _, pkg := range pkgL {
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(pkg.PkgFiles) == 0 {
|
||||
if err := pkg.findPkgFiles(); err != nil {
|
||||
log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||
continue
|
||||
} else if len(pkg.PkgFiles) == 0 {
|
||||
if pkg.DBPackage != nil {
|
||||
err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var realPkgs []string
|
||||
for _, filePath := range pkg.PkgFiles {
|
||||
if _, err := os.Stat(filePath); err == nil {
|
||||
realPkgs = append(realPkgs, Package(filePath).Name())
|
||||
}
|
||||
}
|
||||
|
||||
if len(realPkgs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
b.repoWG.Add(1)
|
||||
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
||||
args = append(args, realPkgs...)
|
||||
cmd := exec.CommandContext(ctx, "repo-remove", args...)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
||||
log.Warningf("error while deleting package %s: %s", pkg.Pkgbase, string(res))
|
||||
}
|
||||
|
||||
if pkg.DBPackage != nil {
|
||||
err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range pkg.PkgFiles {
|
||||
_ = os.Remove(file)
|
||||
_ = os.Remove(file + ".sig")
|
||||
}
|
||||
err = updateLastUpdated()
|
||||
if err != nil {
|
||||
log.Warningf("error updating lastupdate: %v", err)
|
||||
}
|
||||
b.repoWG.Done()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
err := os.MkdirAll(conf.Basedir.Work, 0o755)
|
||||
if err != nil {
|
||||
log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
|
||||
}
|
||||
|
||||
gitPath := filepath.Join(conf.Basedir.Work, stateDir)
|
||||
for {
|
||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||
cmd := exec.CommandContext(ctx, "git", "clone", "--depth=1", conf.StateRepo, gitPath) //nolint:gosec
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("error cloning state repo: %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
cmd := exec.CommandContext(ctx, "git", "reset", "--hard")
|
||||
cmd.Dir = gitPath
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("error reseting state repo: %v", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "pull")
|
||||
cmd.Dir = gitPath
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("failed to update state repo: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// housekeeping
|
||||
wg := new(sync.WaitGroup)
|
||||
for _, repo := range repos {
|
||||
wg.Add(1)
|
||||
splitRepo := strings.Split(repo, "-")
|
||||
go func() {
|
||||
err := housekeeping(ctx, splitRepo[0], strings.Join(splitRepo[1:], "-"), wg)
|
||||
if err != nil {
|
||||
log.Warningf("[%s] housekeeping failed: %v", repo, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
err := logHK(ctx)
|
||||
if err != nil {
|
||||
log.Warningf("log-housekeeping failed: %v", err)
|
||||
}
|
||||
debugHK()
|
||||
|
||||
// fetch updates between sync runs
|
||||
b.alpmMutex.Lock()
|
||||
err = alpmHandle.Release()
|
||||
if err != nil {
|
||||
log.Fatalf("error releasing ALPM handle: %v", err)
|
||||
}
|
||||
|
||||
if err := retry.Fibonacci(ctx, 1*time.Second, func(_ context.Context) error {
|
||||
if err := setupChroot(ctx); err != nil {
|
||||
log.Warningf("unable to upgrade chroot, trying again later")
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
if err != nil {
|
||||
log.Warningf("error while alpm-init: %v", err)
|
||||
}
|
||||
b.alpmMutex.Unlock()
|
||||
|
||||
queue, err := b.genQueue(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("error building queue: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("build-queue with %d items", len(queue))
|
||||
err = b.buildQueue(ctx, queue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.Err() == nil {
|
||||
for _, repo := range repos {
|
||||
err = movePackagesLive(ctx, repo)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] error moving packages live: %v", repo, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
b.metrics.queueSize.Reset()
|
||||
log.Debugf("build-cycle finished")
|
||||
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
|
||||
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error scanning for state-files: %w", err)
|
||||
}
|
||||
|
||||
var pkgbuilds []*ProtoPackage
|
||||
for _, stateFile := range stateFiles {
|
||||
stat, err := os.Stat(stateFile)
|
||||
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") || strings.Contains(stateFile, "README.md") {
|
||||
continue
|
||||
}
|
||||
|
||||
repo, subRepo, arch, err := stateFileMeta(stateFile)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] error generating statefile metadata %s: %v", stateFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !Contains(conf.Repos, repo) || (subRepo != nil && Contains(conf.Blacklist.Repo, *subRepo)) {
|
||||
continue
|
||||
}
|
||||
|
||||
rawState, err := os.ReadFile(stateFile)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] cannot read statefile %s: %v", stateFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
state, err := parseState(string(rawState))
|
||||
if err != nil {
|
||||
log.Warningf("[QG] cannot parse statefile %s: %v", stateFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, march := range conf.March {
|
||||
pkg := &ProtoPackage{
|
||||
Pkgbase: state.Pkgbase,
|
||||
Repo: dbpackage.Repository(repo),
|
||||
March: march,
|
||||
FullRepo: repo + "-" + march,
|
||||
State: state,
|
||||
Version: state.PkgVer,
|
||||
Arch: arch,
|
||||
}
|
||||
|
||||
err = pkg.toDBPackage(ctx, false)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !pkg.isAvailable(ctx, alpmHandle) {
|
||||
log.Debugf("[QG] %s->%s not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
||||
continue
|
||||
}
|
||||
|
||||
aBuild, err := pkg.IsBuilt()
|
||||
if err != nil {
|
||||
log.Warningf("[QG] %s->%s error determining built packages: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||
}
|
||||
if aBuild {
|
||||
log.Infof("[QG] %s->%s already built, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
||||
continue
|
||||
}
|
||||
|
||||
if pkg.DBPackage == nil {
|
||||
err = pkg.toDBPackage(ctx, true)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if pkg.DBPackage.TagRev != nil && *pkg.DBPackage.TagRev == state.TagRev {
|
||||
continue
|
||||
}
|
||||
|
||||
// try download .SRCINFO from repo
|
||||
srcInfo, err := downloadSRCINFO(pkg.DBPackage.Pkgbase, state.TagRev)
|
||||
if err == nil {
|
||||
pkg.Srcinfo = srcInfo
|
||||
}
|
||||
|
||||
if !pkg.isEligible(ctx) {
|
||||
continue
|
||||
}
|
||||
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetStatus(dbpackage.StatusQueued).Save(ctx)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] error updating dbpackage %s: %v", state.Pkgbase, err)
|
||||
}
|
||||
pkgbuilds = append(pkgbuilds, pkg)
|
||||
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
return pkgbuilds, nil
|
||||
}
|
@@ -2,11 +2,10 @@ arch: x86_64
|
||||
repos:
|
||||
- core
|
||||
- extra
|
||||
- community
|
||||
|
||||
svn2git:
|
||||
upstream-core-extra: "https://github.com/archlinux/svntogit-packages.git"
|
||||
upstream-community: "https://github.com/archlinux/svntogit-community.git"
|
||||
state_repo: "https://gitlab.archlinux.org/archlinux/packaging/state.git"
|
||||
|
||||
max_clone_retries: 100
|
||||
|
||||
db:
|
||||
driver: pgx
|
||||
@@ -14,9 +13,8 @@ db:
|
||||
|
||||
basedir:
|
||||
repo: /var/lib/alhp/repo/
|
||||
chroot: /var/lib/alhp/chroot/
|
||||
makepkg: /var/lib/alhp/makepkg/
|
||||
upstream: /var/lib/alhp/upstream/
|
||||
work: /var/lib/alhp/workspace/
|
||||
debug: /var/lib/alhp/debug/
|
||||
|
||||
march:
|
||||
- x86-64-v3
|
||||
@@ -36,18 +34,17 @@ blacklist:
|
||||
- rust
|
||||
|
||||
build:
|
||||
# number of workers total
|
||||
worker: 4
|
||||
makej: 8
|
||||
|
||||
status:
|
||||
class:
|
||||
skipped: "secondary"
|
||||
queued: "warning"
|
||||
latest: "primary"
|
||||
failed: "danger"
|
||||
signing: "success"
|
||||
building: "info"
|
||||
unknown: "dark"
|
||||
checks: true
|
||||
# how much memory ALHP should use
|
||||
# this will also decide how many builds will run concurrently,
|
||||
# since ALHP will try to optimise the queue for speed while not going over this limit
|
||||
memory_limit: "16gb"
|
||||
|
||||
logging:
|
||||
level: INFO
|
||||
level: INFO
|
||||
|
||||
metrics:
|
||||
port: 9568
|
||||
|
244
ent/client.go
244
ent/client.go
@@ -1,18 +1,20 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
|
||||
"ALHP.go/ent/migrate"
|
||||
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
)
|
||||
|
||||
// Client is the client that holds all ent builders.
|
||||
@@ -20,22 +22,76 @@ type Client struct {
|
||||
config
|
||||
// Schema is the client for creating, migrating and dropping schema.
|
||||
Schema *migrate.Schema
|
||||
// DbPackage is the client for interacting with the DbPackage builders.
|
||||
DbPackage *DbPackageClient
|
||||
// DBPackage is the client for interacting with the DBPackage builders.
|
||||
DBPackage *DBPackageClient
|
||||
}
|
||||
|
||||
// NewClient creates a new client configured with the given options.
|
||||
func NewClient(opts ...Option) *Client {
|
||||
cfg := config{log: log.Println, hooks: &hooks{}}
|
||||
cfg.options(opts...)
|
||||
client := &Client{config: cfg}
|
||||
client := &Client{config: newConfig(opts...)}
|
||||
client.init()
|
||||
return client
|
||||
}
|
||||
|
||||
func (c *Client) init() {
|
||||
c.Schema = migrate.NewSchema(c.driver)
|
||||
c.DbPackage = NewDbPackageClient(c.config)
|
||||
c.DBPackage = NewDBPackageClient(c.config)
|
||||
}
|
||||
|
||||
type (
|
||||
// config is the configuration for the client and its builder.
|
||||
config struct {
|
||||
// driver used for executing database requests.
|
||||
driver dialect.Driver
|
||||
// debug enable a debug logging.
|
||||
debug bool
|
||||
// log used for logging on debug mode.
|
||||
log func(...any)
|
||||
// hooks to execute on mutations.
|
||||
hooks *hooks
|
||||
// interceptors to execute on queries.
|
||||
inters *inters
|
||||
}
|
||||
// Option function to configure the client.
|
||||
Option func(*config)
|
||||
)
|
||||
|
||||
// newConfig creates a new config for the client.
|
||||
func newConfig(opts ...Option) config {
|
||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
||||
cfg.options(opts...)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// options applies the options on the config object.
|
||||
func (c *config) options(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
if c.debug {
|
||||
c.driver = dialect.Debug(c.driver, c.log)
|
||||
}
|
||||
}
|
||||
|
||||
// Debug enables debug logging on the ent.Driver.
|
||||
func Debug() Option {
|
||||
return func(c *config) {
|
||||
c.debug = true
|
||||
}
|
||||
}
|
||||
|
||||
// Log sets the logging function for debug mode.
|
||||
func Log(fn func(...any)) Option {
|
||||
return func(c *config) {
|
||||
c.log = fn
|
||||
}
|
||||
}
|
||||
|
||||
// Driver configures the client driver.
|
||||
func Driver(driver dialect.Driver) Option {
|
||||
return func(c *config) {
|
||||
c.driver = driver
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens a database/sql.DB specified by the driver name and
|
||||
@@ -54,11 +110,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
|
||||
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
|
||||
|
||||
// Tx returns a new transactional client. The provided context
|
||||
// is used until the transaction is committed or rolled back.
|
||||
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
if _, ok := c.driver.(*txDriver); ok {
|
||||
return nil, fmt.Errorf("ent: cannot start a transaction within a transaction")
|
||||
return nil, ErrTxStarted
|
||||
}
|
||||
tx, err := newTx(ctx, c.driver)
|
||||
if err != nil {
|
||||
@@ -69,14 +128,14 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
DbPackage: NewDbPackageClient(cfg),
|
||||
DBPackage: NewDBPackageClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BeginTx returns a transactional client with specified options.
|
||||
func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
|
||||
if _, ok := c.driver.(*txDriver); ok {
|
||||
return nil, fmt.Errorf("ent: cannot start a transaction within a transaction")
|
||||
return nil, errors.New("ent: cannot start a transaction within a transaction")
|
||||
}
|
||||
tx, err := c.driver.(interface {
|
||||
BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
|
||||
@@ -87,18 +146,18 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
||||
cfg := c.config
|
||||
cfg.driver = &txDriver{tx: tx, drv: c.driver}
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
DbPackage: NewDbPackageClient(cfg),
|
||||
DBPackage: NewDBPackageClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
|
||||
//
|
||||
// client.Debug().
|
||||
// DbPackage.
|
||||
// DBPackage.
|
||||
// Query().
|
||||
// Count(ctx)
|
||||
//
|
||||
func (c *Client) Debug() *Client {
|
||||
if c.debug {
|
||||
return c
|
||||
@@ -118,87 +177,126 @@ func (c *Client) Close() error {
|
||||
// Use adds the mutation hooks to all the entity clients.
|
||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
||||
func (c *Client) Use(hooks ...Hook) {
|
||||
c.DbPackage.Use(hooks...)
|
||||
c.DBPackage.Use(hooks...)
|
||||
}
|
||||
|
||||
// DbPackageClient is a client for the DbPackage schema.
|
||||
type DbPackageClient struct {
|
||||
// Intercept adds the query interceptors to all the entity clients.
|
||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||
c.DBPackage.Intercept(interceptors...)
|
||||
}
|
||||
|
||||
// Mutate implements the ent.Mutator interface.
|
||||
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
||||
switch m := m.(type) {
|
||||
case *DBPackageMutation:
|
||||
return c.DBPackage.mutate(ctx, m)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown mutation type %T", m)
|
||||
}
|
||||
}
|
||||
|
||||
// DBPackageClient is a client for the DBPackage schema.
|
||||
type DBPackageClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewDbPackageClient returns a client for the DbPackage from the given config.
|
||||
func NewDbPackageClient(c config) *DbPackageClient {
|
||||
return &DbPackageClient{config: c}
|
||||
// NewDBPackageClient returns a client for the DBPackage from the given config.
|
||||
func NewDBPackageClient(c config) *DBPackageClient {
|
||||
return &DBPackageClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `dbpackage.Hooks(f(g(h())))`.
|
||||
func (c *DbPackageClient) Use(hooks ...Hook) {
|
||||
c.hooks.DbPackage = append(c.hooks.DbPackage, hooks...)
|
||||
func (c *DBPackageClient) Use(hooks ...Hook) {
|
||||
c.hooks.DBPackage = append(c.hooks.DBPackage, hooks...)
|
||||
}
|
||||
|
||||
// Create returns a create builder for DbPackage.
|
||||
func (c *DbPackageClient) Create() *DbPackageCreate {
|
||||
mutation := newDbPackageMutation(c.config, OpCreate)
|
||||
return &DbPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `dbpackage.Intercept(f(g(h())))`.
|
||||
func (c *DBPackageClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.DBPackage = append(c.inters.DBPackage, interceptors...)
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of DbPackage entities.
|
||||
func (c *DbPackageClient) CreateBulk(builders ...*DbPackageCreate) *DbPackageCreateBulk {
|
||||
return &DbPackageCreateBulk{config: c.config, builders: builders}
|
||||
// Create returns a builder for creating a DBPackage entity.
|
||||
func (c *DBPackageClient) Create() *DBPackageCreate {
|
||||
mutation := newDBPackageMutation(c.config, OpCreate)
|
||||
return &DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Update returns an update builder for DbPackage.
|
||||
func (c *DbPackageClient) Update() *DbPackageUpdate {
|
||||
mutation := newDbPackageMutation(c.config, OpUpdate)
|
||||
return &DbPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
// CreateBulk returns a builder for creating a bulk of DBPackage entities.
|
||||
func (c *DBPackageClient) CreateBulk(builders ...*DBPackageCreate) *DBPackageCreateBulk {
|
||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *DBPackageClient) MapCreateBulk(slice any, setFunc func(*DBPackageCreate, int)) *DBPackageCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &DBPackageCreateBulk{err: fmt.Errorf("calling to DBPackageClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*DBPackageCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for DBPackage.
|
||||
func (c *DBPackageClient) Update() *DBPackageUpdate {
|
||||
mutation := newDBPackageMutation(c.config, OpUpdate)
|
||||
return &DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *DbPackageClient) UpdateOne(dp *DbPackage) *DbPackageUpdateOne {
|
||||
mutation := newDbPackageMutation(c.config, OpUpdateOne, withDbPackage(dp))
|
||||
return &DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
func (c *DBPackageClient) UpdateOne(dp *DBPackage) *DBPackageUpdateOne {
|
||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(dp))
|
||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *DbPackageClient) UpdateOneID(id int) *DbPackageUpdateOne {
|
||||
mutation := newDbPackageMutation(c.config, OpUpdateOne, withDbPackageID(id))
|
||||
return &DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
func (c *DBPackageClient) UpdateOneID(id int) *DBPackageUpdateOne {
|
||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackageID(id))
|
||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for DbPackage.
|
||||
func (c *DbPackageClient) Delete() *DbPackageDelete {
|
||||
mutation := newDbPackageMutation(c.config, OpDelete)
|
||||
return &DbPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
// Delete returns a delete builder for DBPackage.
|
||||
func (c *DBPackageClient) Delete() *DBPackageDelete {
|
||||
mutation := newDBPackageMutation(c.config, OpDelete)
|
||||
return &DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a delete builder for the given entity.
|
||||
func (c *DbPackageClient) DeleteOne(dp *DbPackage) *DbPackageDeleteOne {
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *DBPackageClient) DeleteOne(dp *DBPackage) *DBPackageDeleteOne {
|
||||
return c.DeleteOneID(dp.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a delete builder for the given id.
|
||||
func (c *DbPackageClient) DeleteOneID(id int) *DbPackageDeleteOne {
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *DBPackageClient) DeleteOneID(id int) *DBPackageDeleteOne {
|
||||
builder := c.Delete().Where(dbpackage.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &DbPackageDeleteOne{builder}
|
||||
return &DBPackageDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for DbPackage.
|
||||
func (c *DbPackageClient) Query() *DbPackageQuery {
|
||||
return &DbPackageQuery{
|
||||
// Query returns a query builder for DBPackage.
|
||||
func (c *DBPackageClient) Query() *DBPackageQuery {
|
||||
return &DBPackageQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeDBPackage},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a DbPackage entity by its id.
|
||||
func (c *DbPackageClient) Get(ctx context.Context, id int) (*DbPackage, error) {
|
||||
// Get returns a DBPackage entity by its id.
|
||||
func (c *DBPackageClient) Get(ctx context.Context, id int) (*DBPackage, error) {
|
||||
return c.Query().Where(dbpackage.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *DbPackageClient) GetX(ctx context.Context, id int) *DbPackage {
|
||||
func (c *DBPackageClient) GetX(ctx context.Context, id int) *DBPackage {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -207,6 +305,36 @@ func (c *DbPackageClient) GetX(ctx context.Context, id int) *DbPackage {
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *DbPackageClient) Hooks() []Hook {
|
||||
return c.hooks.DbPackage
|
||||
func (c *DBPackageClient) Hooks() []Hook {
|
||||
return c.hooks.DBPackage
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *DBPackageClient) Interceptors() []Interceptor {
|
||||
return c.inters.DBPackage
|
||||
}
|
||||
|
||||
func (c *DBPackageClient) mutate(ctx context.Context, m *DBPackageMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown DBPackage mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// hooks and interceptors per client, for fast access.
|
||||
type (
|
||||
hooks struct {
|
||||
DBPackage []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
DBPackage []ent.Interceptor
|
||||
}
|
||||
)
|
||||
|
@@ -1,59 +0,0 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
)
|
||||
|
||||
// Option function to configure the client.
|
||||
type Option func(*config)
|
||||
|
||||
// Config is the configuration for the client and its builder.
|
||||
type config struct {
|
||||
// driver used for executing database requests.
|
||||
driver dialect.Driver
|
||||
// debug enable a debug logging.
|
||||
debug bool
|
||||
// log used for logging on debug mode.
|
||||
log func(...interface{})
|
||||
// hooks to execute on mutations.
|
||||
hooks *hooks
|
||||
}
|
||||
|
||||
// hooks per client, for fast access.
|
||||
type hooks struct {
|
||||
DbPackage []ent.Hook
|
||||
}
|
||||
|
||||
// Options applies the options on the config object.
|
||||
func (c *config) options(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
if c.debug {
|
||||
c.driver = dialect.Debug(c.driver, c.log)
|
||||
}
|
||||
}
|
||||
|
||||
// Debug enables debug logging on the ent.Driver.
|
||||
func Debug() Option {
|
||||
return func(c *config) {
|
||||
c.debug = true
|
||||
}
|
||||
}
|
||||
|
||||
// Log sets the logging function for debug mode.
|
||||
func Log(fn func(...interface{})) Option {
|
||||
return func(c *config) {
|
||||
c.log = fn
|
||||
}
|
||||
}
|
||||
|
||||
// Driver configures the client driver.
|
||||
func Driver(driver dialect.Driver) Option {
|
||||
return func(c *config) {
|
||||
c.driver = driver
|
||||
}
|
||||
}
|
@@ -1,33 +0,0 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type clientCtxKey struct{}
|
||||
|
||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||
func FromContext(ctx context.Context) *Client {
|
||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Client attached.
|
||||
func NewContext(parent context.Context, c *Client) context.Context {
|
||||
return context.WithValue(parent, clientCtxKey{}, c)
|
||||
}
|
||||
|
||||
type txCtxKey struct{}
|
||||
|
||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||
func TxFromContext(ctx context.Context) *Tx {
|
||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||
return tx
|
||||
}
|
||||
|
||||
// NewTxContext returns a new context with the given Tx attached.
|
||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
return context.WithValue(parent, txCtxKey{}, tx)
|
||||
}
|
241
ent/dbpackage.go
241
ent/dbpackage.go
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
@@ -8,12 +8,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
)
|
||||
|
||||
// DbPackage is the model entity for the DbPackage schema.
|
||||
type DbPackage struct {
|
||||
// DBPackage is the model entity for the DBPackage schema.
|
||||
type DBPackage struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
@@ -35,37 +36,54 @@ type DbPackage struct {
|
||||
RepoVersion string `json:"repo_version,omitempty"`
|
||||
// BuildTimeStart holds the value of the "build_time_start" field.
|
||||
BuildTimeStart time.Time `json:"build_time_start,omitempty"`
|
||||
// BuildTimeEnd holds the value of the "build_time_end" field.
|
||||
BuildTimeEnd time.Time `json:"build_time_end,omitempty"`
|
||||
// Updated holds the value of the "updated" field.
|
||||
Updated time.Time `json:"updated,omitempty"`
|
||||
// Hash holds the value of the "hash" field.
|
||||
Hash string `json:"hash,omitempty"`
|
||||
// Lto holds the value of the "lto" field.
|
||||
Lto dbpackage.Lto `json:"lto,omitempty"`
|
||||
// LastVersionBuild holds the value of the "last_version_build" field.
|
||||
LastVersionBuild string `json:"last_version_build,omitempty"`
|
||||
// LastVerified holds the value of the "last_verified" field.
|
||||
LastVerified time.Time `json:"last_verified,omitempty"`
|
||||
// DebugSymbols holds the value of the "debug_symbols" field.
|
||||
DebugSymbols dbpackage.DebugSymbols `json:"debug_symbols,omitempty"`
|
||||
// MaxRss holds the value of the "max_rss" field.
|
||||
MaxRss *int64 `json:"max_rss,omitempty"`
|
||||
// UTime holds the value of the "u_time" field.
|
||||
UTime *int64 `json:"u_time,omitempty"`
|
||||
// STime holds the value of the "s_time" field.
|
||||
STime *int64 `json:"s_time,omitempty"`
|
||||
// IoIn holds the value of the "io_in" field.
|
||||
IoIn *int64 `json:"io_in,omitempty"`
|
||||
// IoOut holds the value of the "io_out" field.
|
||||
IoOut *int64 `json:"io_out,omitempty"`
|
||||
// TagRev holds the value of the "tag_rev" field.
|
||||
TagRev *string `json:"tag_rev,omitempty"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*DbPackage) scanValues(columns []string) ([]interface{}, error) {
|
||||
values := make([]interface{}, len(columns))
|
||||
func (*DBPackage) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case dbpackage.FieldPackages:
|
||||
values[i] = new([]byte)
|
||||
case dbpackage.FieldID:
|
||||
case dbpackage.FieldID, dbpackage.FieldMaxRss, dbpackage.FieldUTime, dbpackage.FieldSTime, dbpackage.FieldIoIn, dbpackage.FieldIoOut:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldHash:
|
||||
case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldLto, dbpackage.FieldLastVersionBuild, dbpackage.FieldDebugSymbols, dbpackage.FieldTagRev:
|
||||
values[i] = new(sql.NullString)
|
||||
case dbpackage.FieldBuildTimeStart, dbpackage.FieldBuildTimeEnd, dbpackage.FieldUpdated:
|
||||
case dbpackage.FieldBuildTimeStart, dbpackage.FieldUpdated, dbpackage.FieldLastVerified:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type DbPackage", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the DbPackage fields.
|
||||
func (dp *DbPackage) assignValues(columns []string, values []interface{}) error {
|
||||
// to the DBPackage fields.
|
||||
func (dp *DBPackage) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
@@ -133,85 +151,188 @@ func (dp *DbPackage) assignValues(columns []string, values []interface{}) error
|
||||
} else if value.Valid {
|
||||
dp.BuildTimeStart = value.Time
|
||||
}
|
||||
case dbpackage.FieldBuildTimeEnd:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field build_time_end", values[i])
|
||||
} else if value.Valid {
|
||||
dp.BuildTimeEnd = value.Time
|
||||
}
|
||||
case dbpackage.FieldUpdated:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated", values[i])
|
||||
} else if value.Valid {
|
||||
dp.Updated = value.Time
|
||||
}
|
||||
case dbpackage.FieldHash:
|
||||
case dbpackage.FieldLto:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field hash", values[i])
|
||||
return fmt.Errorf("unexpected type %T for field lto", values[i])
|
||||
} else if value.Valid {
|
||||
dp.Hash = value.String
|
||||
dp.Lto = dbpackage.Lto(value.String)
|
||||
}
|
||||
case dbpackage.FieldLastVersionBuild:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field last_version_build", values[i])
|
||||
} else if value.Valid {
|
||||
dp.LastVersionBuild = value.String
|
||||
}
|
||||
case dbpackage.FieldLastVerified:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field last_verified", values[i])
|
||||
} else if value.Valid {
|
||||
dp.LastVerified = value.Time
|
||||
}
|
||||
case dbpackage.FieldDebugSymbols:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field debug_symbols", values[i])
|
||||
} else if value.Valid {
|
||||
dp.DebugSymbols = dbpackage.DebugSymbols(value.String)
|
||||
}
|
||||
case dbpackage.FieldMaxRss:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field max_rss", values[i])
|
||||
} else if value.Valid {
|
||||
dp.MaxRss = new(int64)
|
||||
*dp.MaxRss = value.Int64
|
||||
}
|
||||
case dbpackage.FieldUTime:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field u_time", values[i])
|
||||
} else if value.Valid {
|
||||
dp.UTime = new(int64)
|
||||
*dp.UTime = value.Int64
|
||||
}
|
||||
case dbpackage.FieldSTime:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field s_time", values[i])
|
||||
} else if value.Valid {
|
||||
dp.STime = new(int64)
|
||||
*dp.STime = value.Int64
|
||||
}
|
||||
case dbpackage.FieldIoIn:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field io_in", values[i])
|
||||
} else if value.Valid {
|
||||
dp.IoIn = new(int64)
|
||||
*dp.IoIn = value.Int64
|
||||
}
|
||||
case dbpackage.FieldIoOut:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field io_out", values[i])
|
||||
} else if value.Valid {
|
||||
dp.IoOut = new(int64)
|
||||
*dp.IoOut = value.Int64
|
||||
}
|
||||
case dbpackage.FieldTagRev:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
|
||||
} else if value.Valid {
|
||||
dp.TagRev = new(string)
|
||||
*dp.TagRev = value.String
|
||||
}
|
||||
default:
|
||||
dp.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this DbPackage.
|
||||
// Note that you need to call DbPackage.Unwrap() before calling this method if this DbPackage
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (dp *DbPackage) Update() *DbPackageUpdateOne {
|
||||
return (&DbPackageClient{config: dp.config}).UpdateOne(dp)
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (dp *DBPackage) Value(name string) (ent.Value, error) {
|
||||
return dp.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the DbPackage entity that was returned from a transaction after it was closed,
|
||||
// Update returns a builder for updating this DBPackage.
|
||||
// Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (dp *DBPackage) Update() *DBPackageUpdateOne {
|
||||
return NewDBPackageClient(dp.config).UpdateOne(dp)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (dp *DbPackage) Unwrap() *DbPackage {
|
||||
tx, ok := dp.config.driver.(*txDriver)
|
||||
func (dp *DBPackage) Unwrap() *DBPackage {
|
||||
_tx, ok := dp.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: DbPackage is not a transactional entity")
|
||||
panic("ent: DBPackage is not a transactional entity")
|
||||
}
|
||||
dp.config.driver = tx.drv
|
||||
dp.config.driver = _tx.drv
|
||||
return dp
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (dp *DbPackage) String() string {
|
||||
func (dp *DBPackage) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("DbPackage(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v", dp.ID))
|
||||
builder.WriteString(", pkgbase=")
|
||||
builder.WriteString("DBPackage(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID))
|
||||
builder.WriteString("pkgbase=")
|
||||
builder.WriteString(dp.Pkgbase)
|
||||
builder.WriteString(", packages=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("packages=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dp.Packages))
|
||||
builder.WriteString(", status=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("status=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dp.Status))
|
||||
builder.WriteString(", skip_reason=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("skip_reason=")
|
||||
builder.WriteString(dp.SkipReason)
|
||||
builder.WriteString(", repository=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("repository=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dp.Repository))
|
||||
builder.WriteString(", march=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("march=")
|
||||
builder.WriteString(dp.March)
|
||||
builder.WriteString(", version=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("version=")
|
||||
builder.WriteString(dp.Version)
|
||||
builder.WriteString(", repo_version=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("repo_version=")
|
||||
builder.WriteString(dp.RepoVersion)
|
||||
builder.WriteString(", build_time_start=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("build_time_start=")
|
||||
builder.WriteString(dp.BuildTimeStart.Format(time.ANSIC))
|
||||
builder.WriteString(", build_time_end=")
|
||||
builder.WriteString(dp.BuildTimeEnd.Format(time.ANSIC))
|
||||
builder.WriteString(", updated=")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated=")
|
||||
builder.WriteString(dp.Updated.Format(time.ANSIC))
|
||||
builder.WriteString(", hash=")
|
||||
builder.WriteString(dp.Hash)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("lto=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dp.Lto))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("last_version_build=")
|
||||
builder.WriteString(dp.LastVersionBuild)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("last_verified=")
|
||||
builder.WriteString(dp.LastVerified.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("debug_symbols=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dp.DebugSymbols))
|
||||
builder.WriteString(", ")
|
||||
if v := dp.MaxRss; v != nil {
|
||||
builder.WriteString("max_rss=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := dp.UTime; v != nil {
|
||||
builder.WriteString("u_time=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := dp.STime; v != nil {
|
||||
builder.WriteString("s_time=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := dp.IoIn; v != nil {
|
||||
builder.WriteString("io_in=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := dp.IoOut; v != nil {
|
||||
builder.WriteString("io_out=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := dp.TagRev; v != nil {
|
||||
builder.WriteString("tag_rev=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// DbPackages is a parsable slice of DbPackage.
|
||||
type DbPackages []*DbPackage
|
||||
|
||||
func (dp DbPackages) config(cfg config) {
|
||||
for _i := range dp {
|
||||
dp[_i].config = cfg
|
||||
}
|
||||
}
|
||||
// DBPackages is a parsable slice of DBPackage.
|
||||
type DBPackages []*DBPackage
|
||||
|
@@ -1,9 +1,11 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package dbpackage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,12 +31,28 @@ const (
|
||||
FieldRepoVersion = "repo_version"
|
||||
// FieldBuildTimeStart holds the string denoting the build_time_start field in the database.
|
||||
FieldBuildTimeStart = "build_time_start"
|
||||
// FieldBuildTimeEnd holds the string denoting the build_time_end field in the database.
|
||||
FieldBuildTimeEnd = "build_time_end"
|
||||
// FieldUpdated holds the string denoting the updated field in the database.
|
||||
FieldUpdated = "updated"
|
||||
// FieldHash holds the string denoting the hash field in the database.
|
||||
FieldHash = "hash"
|
||||
// FieldLto holds the string denoting the lto field in the database.
|
||||
FieldLto = "lto"
|
||||
// FieldLastVersionBuild holds the string denoting the last_version_build field in the database.
|
||||
FieldLastVersionBuild = "last_version_build"
|
||||
// FieldLastVerified holds the string denoting the last_verified field in the database.
|
||||
FieldLastVerified = "last_verified"
|
||||
// FieldDebugSymbols holds the string denoting the debug_symbols field in the database.
|
||||
FieldDebugSymbols = "debug_symbols"
|
||||
// FieldMaxRss holds the string denoting the max_rss field in the database.
|
||||
FieldMaxRss = "max_rss"
|
||||
// FieldUTime holds the string denoting the u_time field in the database.
|
||||
FieldUTime = "u_time"
|
||||
// FieldSTime holds the string denoting the s_time field in the database.
|
||||
FieldSTime = "s_time"
|
||||
// FieldIoIn holds the string denoting the io_in field in the database.
|
||||
FieldIoIn = "io_in"
|
||||
// FieldIoOut holds the string denoting the io_out field in the database.
|
||||
FieldIoOut = "io_out"
|
||||
// FieldTagRev holds the string denoting the tag_rev field in the database.
|
||||
FieldTagRev = "tag_rev"
|
||||
// Table holds the table name of the dbpackage in the database.
|
||||
Table = "db_packages"
|
||||
)
|
||||
@@ -51,9 +69,17 @@ var Columns = []string{
|
||||
FieldVersion,
|
||||
FieldRepoVersion,
|
||||
FieldBuildTimeStart,
|
||||
FieldBuildTimeEnd,
|
||||
FieldUpdated,
|
||||
FieldHash,
|
||||
FieldLto,
|
||||
FieldLastVersionBuild,
|
||||
FieldLastVerified,
|
||||
FieldDebugSymbols,
|
||||
FieldMaxRss,
|
||||
FieldUTime,
|
||||
FieldSTime,
|
||||
FieldIoIn,
|
||||
FieldIoOut,
|
||||
FieldTagRev,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
@@ -83,8 +109,9 @@ const DefaultStatus = StatusUnknown
|
||||
const (
|
||||
StatusSkipped Status = "skipped"
|
||||
StatusFailed Status = "failed"
|
||||
StatusBuild Status = "build"
|
||||
StatusBuilt Status = "built"
|
||||
StatusQueued Status = "queued"
|
||||
StatusDelayed Status = "delayed"
|
||||
StatusBuilding Status = "building"
|
||||
StatusLatest Status = "latest"
|
||||
StatusSigning Status = "signing"
|
||||
@@ -98,7 +125,7 @@ func (s Status) String() string {
|
||||
// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
|
||||
func StatusValidator(s Status) error {
|
||||
switch s {
|
||||
case StatusSkipped, StatusFailed, StatusBuild, StatusQueued, StatusBuilding, StatusLatest, StatusSigning, StatusUnknown:
|
||||
case StatusSkipped, StatusFailed, StatusBuilt, StatusQueued, StatusDelayed, StatusBuilding, StatusLatest, StatusSigning, StatusUnknown:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("dbpackage: invalid enum value for status field: %q", s)
|
||||
@@ -110,9 +137,9 @@ type Repository string
|
||||
|
||||
// Repository values.
|
||||
const (
|
||||
RepositoryExtra Repository = "extra"
|
||||
RepositoryCore Repository = "core"
|
||||
RepositoryCommunity Repository = "community"
|
||||
RepositoryExtra Repository = "extra"
|
||||
RepositoryCore Repository = "core"
|
||||
RepositoryMultilib Repository = "multilib"
|
||||
)
|
||||
|
||||
func (r Repository) String() string {
|
||||
@@ -122,9 +149,167 @@ func (r Repository) String() string {
|
||||
// RepositoryValidator is a validator for the "repository" field enum values. It is called by the builders before save.
|
||||
func RepositoryValidator(r Repository) error {
|
||||
switch r {
|
||||
case RepositoryExtra, RepositoryCore, RepositoryCommunity:
|
||||
case RepositoryExtra, RepositoryCore, RepositoryMultilib:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("dbpackage: invalid enum value for repository field: %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// Lto defines the type for the "lto" enum field.
|
||||
type Lto string
|
||||
|
||||
// LtoUnknown is the default value of the Lto enum.
|
||||
const DefaultLto = LtoUnknown
|
||||
|
||||
// Lto values.
|
||||
const (
|
||||
LtoEnabled Lto = "enabled"
|
||||
LtoUnknown Lto = "unknown"
|
||||
LtoDisabled Lto = "disabled"
|
||||
LtoAutoDisabled Lto = "auto_disabled"
|
||||
)
|
||||
|
||||
func (l Lto) String() string {
|
||||
return string(l)
|
||||
}
|
||||
|
||||
// LtoValidator is a validator for the "lto" field enum values. It is called by the builders before save.
|
||||
func LtoValidator(l Lto) error {
|
||||
switch l {
|
||||
case LtoEnabled, LtoUnknown, LtoDisabled, LtoAutoDisabled:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("dbpackage: invalid enum value for lto field: %q", l)
|
||||
}
|
||||
}
|
||||
|
||||
// DebugSymbols defines the type for the "debug_symbols" enum field.
|
||||
type DebugSymbols string
|
||||
|
||||
// DebugSymbolsUnknown is the default value of the DebugSymbols enum.
|
||||
const DefaultDebugSymbols = DebugSymbolsUnknown
|
||||
|
||||
// DebugSymbols values.
|
||||
const (
|
||||
DebugSymbolsAvailable DebugSymbols = "available"
|
||||
DebugSymbolsUnknown DebugSymbols = "unknown"
|
||||
DebugSymbolsNotAvailable DebugSymbols = "not_available"
|
||||
)
|
||||
|
||||
func (ds DebugSymbols) String() string {
|
||||
return string(ds)
|
||||
}
|
||||
|
||||
// DebugSymbolsValidator is a validator for the "debug_symbols" field enum values. It is called by the builders before save.
|
||||
func DebugSymbolsValidator(ds DebugSymbols) error {
|
||||
switch ds {
|
||||
case DebugSymbolsAvailable, DebugSymbolsUnknown, DebugSymbolsNotAvailable:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("dbpackage: invalid enum value for debug_symbols field: %q", ds)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the DBPackage queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPkgbase orders the results by the pkgbase field.
|
||||
func ByPkgbase(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPkgbase, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByStatus orders the results by the status field.
|
||||
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySkipReason orders the results by the skip_reason field.
|
||||
func BySkipReason(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSkipReason, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRepository orders the results by the repository field.
|
||||
func ByRepository(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRepository, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMarch orders the results by the march field.
|
||||
func ByMarch(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMarch, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByVersion orders the results by the version field.
|
||||
func ByVersion(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldVersion, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRepoVersion orders the results by the repo_version field.
|
||||
func ByRepoVersion(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRepoVersion, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBuildTimeStart orders the results by the build_time_start field.
|
||||
func ByBuildTimeStart(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBuildTimeStart, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdated orders the results by the updated field.
|
||||
func ByUpdated(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdated, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLto orders the results by the lto field.
|
||||
func ByLto(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLto, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLastVersionBuild orders the results by the last_version_build field.
|
||||
func ByLastVersionBuild(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLastVersionBuild, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLastVerified orders the results by the last_verified field.
|
||||
func ByLastVerified(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLastVerified, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDebugSymbols orders the results by the debug_symbols field.
|
||||
func ByDebugSymbols(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDebugSymbols, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMaxRss orders the results by the max_rss field.
|
||||
func ByMaxRss(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMaxRss, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUTime orders the results by the u_time field.
|
||||
func ByUTime(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUTime, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySTime orders the results by the s_time field.
|
||||
func BySTime(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSTime, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIoIn orders the results by the io_in field.
|
||||
func ByIoIn(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIoIn, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIoOut orders the results by the io_out field.
|
||||
func ByIoOut(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIoOut, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTagRev orders the results by the tag_rev field.
|
||||
func ByTagRev(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTagRev, opts...).ToFunc()
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
@@ -8,38 +8,38 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
)
|
||||
|
||||
// DbPackageCreate is the builder for creating a DbPackage entity.
|
||||
type DbPackageCreate struct {
|
||||
// DBPackageCreate is the builder for creating a DBPackage entity.
|
||||
type DBPackageCreate struct {
|
||||
config
|
||||
mutation *DbPackageMutation
|
||||
mutation *DBPackageMutation
|
||||
hooks []Hook
|
||||
}
|
||||
|
||||
// SetPkgbase sets the "pkgbase" field.
|
||||
func (dpc *DbPackageCreate) SetPkgbase(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetPkgbase(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetPkgbase(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetPackages sets the "packages" field.
|
||||
func (dpc *DbPackageCreate) SetPackages(s []string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetPackages(s []string) *DBPackageCreate {
|
||||
dpc.mutation.SetPackages(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (dpc *DbPackageCreate) SetStatus(d dbpackage.Status) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetStatus(d dbpackage.Status) *DBPackageCreate {
|
||||
dpc.mutation.SetStatus(d)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableStatus(d *dbpackage.Status) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableStatus(d *dbpackage.Status) *DBPackageCreate {
|
||||
if d != nil {
|
||||
dpc.SetStatus(*d)
|
||||
}
|
||||
@@ -47,13 +47,13 @@ func (dpc *DbPackageCreate) SetNillableStatus(d *dbpackage.Status) *DbPackageCre
|
||||
}
|
||||
|
||||
// SetSkipReason sets the "skip_reason" field.
|
||||
func (dpc *DbPackageCreate) SetSkipReason(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetSkipReason(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetSkipReason(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableSkipReason(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableSkipReason(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetSkipReason(*s)
|
||||
}
|
||||
@@ -61,25 +61,25 @@ func (dpc *DbPackageCreate) SetNillableSkipReason(s *string) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetRepository sets the "repository" field.
|
||||
func (dpc *DbPackageCreate) SetRepository(d dbpackage.Repository) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetRepository(d dbpackage.Repository) *DBPackageCreate {
|
||||
dpc.mutation.SetRepository(d)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetMarch sets the "march" field.
|
||||
func (dpc *DbPackageCreate) SetMarch(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetMarch(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetMarch(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetVersion sets the "version" field.
|
||||
func (dpc *DbPackageCreate) SetVersion(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetVersion(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetVersion(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableVersion sets the "version" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableVersion(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableVersion(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetVersion(*s)
|
||||
}
|
||||
@@ -87,13 +87,13 @@ func (dpc *DbPackageCreate) SetNillableVersion(s *string) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetRepoVersion sets the "repo_version" field.
|
||||
func (dpc *DbPackageCreate) SetRepoVersion(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetRepoVersion(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetRepoVersion(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableRepoVersion(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableRepoVersion(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetRepoVersion(*s)
|
||||
}
|
||||
@@ -101,110 +101,186 @@ func (dpc *DbPackageCreate) SetNillableRepoVersion(s *string) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetBuildTimeStart sets the "build_time_start" field.
|
||||
func (dpc *DbPackageCreate) SetBuildTimeStart(t time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetBuildTimeStart(t time.Time) *DBPackageCreate {
|
||||
dpc.mutation.SetBuildTimeStart(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DBPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetBuildTimeStart(*t)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetBuildTimeEnd sets the "build_time_end" field.
|
||||
func (dpc *DbPackageCreate) SetBuildTimeEnd(t time.Time) *DbPackageCreate {
|
||||
dpc.mutation.SetBuildTimeEnd(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableBuildTimeEnd sets the "build_time_end" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableBuildTimeEnd(t *time.Time) *DbPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetBuildTimeEnd(*t)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetUpdated sets the "updated" field.
|
||||
func (dpc *DbPackageCreate) SetUpdated(t time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetUpdated(t time.Time) *DBPackageCreate {
|
||||
dpc.mutation.SetUpdated(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableUpdated sets the "updated" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableUpdated(t *time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableUpdated(t *time.Time) *DBPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetUpdated(*t)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetHash sets the "hash" field.
|
||||
func (dpc *DbPackageCreate) SetHash(s string) *DbPackageCreate {
|
||||
dpc.mutation.SetHash(s)
|
||||
// SetLto sets the "lto" field.
|
||||
func (dpc *DBPackageCreate) SetLto(d dbpackage.Lto) *DBPackageCreate {
|
||||
dpc.mutation.SetLto(d)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableHash sets the "hash" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableHash(s *string) *DbPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetHash(*s)
|
||||
// SetNillableLto sets the "lto" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableLto(d *dbpackage.Lto) *DBPackageCreate {
|
||||
if d != nil {
|
||||
dpc.SetLto(*d)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// Mutation returns the DbPackageMutation object of the builder.
|
||||
func (dpc *DbPackageCreate) Mutation() *DbPackageMutation {
|
||||
// SetLastVersionBuild sets the "last_version_build" field.
|
||||
func (dpc *DBPackageCreate) SetLastVersionBuild(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetLastVersionBuild(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableLastVersionBuild(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetLastVersionBuild(*s)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetLastVerified sets the "last_verified" field.
|
||||
func (dpc *DBPackageCreate) SetLastVerified(t time.Time) *DBPackageCreate {
|
||||
dpc.mutation.SetLastVerified(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableLastVerified(t *time.Time) *DBPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetLastVerified(*t)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetDebugSymbols sets the "debug_symbols" field.
|
||||
func (dpc *DBPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DBPackageCreate {
|
||||
dpc.mutation.SetDebugSymbols(ds)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DBPackageCreate {
|
||||
if ds != nil {
|
||||
dpc.SetDebugSymbols(*ds)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetMaxRss sets the "max_rss" field.
|
||||
func (dpc *DBPackageCreate) SetMaxRss(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetMaxRss(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableMaxRss(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetMaxRss(*i)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetUTime sets the "u_time" field.
|
||||
func (dpc *DBPackageCreate) SetUTime(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetUTime(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableUTime sets the "u_time" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableUTime(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetUTime(*i)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetSTime sets the "s_time" field.
|
||||
func (dpc *DBPackageCreate) SetSTime(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetSTime(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableSTime sets the "s_time" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableSTime(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetSTime(*i)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetIoIn sets the "io_in" field.
|
||||
func (dpc *DBPackageCreate) SetIoIn(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetIoIn(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableIoIn sets the "io_in" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableIoIn(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetIoIn(*i)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetIoOut sets the "io_out" field.
|
||||
func (dpc *DBPackageCreate) SetIoOut(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetIoOut(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableIoOut sets the "io_out" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableIoOut(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetIoOut(*i)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetTagRev sets the "tag_rev" field.
|
||||
func (dpc *DBPackageCreate) SetTagRev(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetTagRev(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableTagRev(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetTagRev(*s)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// Mutation returns the DBPackageMutation object of the builder.
|
||||
func (dpc *DBPackageCreate) Mutation() *DBPackageMutation {
|
||||
return dpc.mutation
|
||||
}
|
||||
|
||||
// Save creates the DbPackage in the database.
|
||||
func (dpc *DbPackageCreate) Save(ctx context.Context) (*DbPackage, error) {
|
||||
var (
|
||||
err error
|
||||
node *DbPackage
|
||||
)
|
||||
// Save creates the DBPackage in the database.
|
||||
func (dpc *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
|
||||
dpc.defaults()
|
||||
if len(dpc.hooks) == 0 {
|
||||
if err = dpc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node, err = dpc.sqlSave(ctx)
|
||||
} else {
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*DbPackageMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err = dpc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dpc.mutation = mutation
|
||||
if node, err = dpc.sqlSave(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &node.ID
|
||||
mutation.done = true
|
||||
return node, err
|
||||
})
|
||||
for i := len(dpc.hooks) - 1; i >= 0; i-- {
|
||||
if dpc.hooks[i] == nil {
|
||||
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = dpc.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, dpc.mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return node, err
|
||||
return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (dpc *DbPackageCreate) SaveX(ctx context.Context) *DbPackage {
|
||||
func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
|
||||
v, err := dpc.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -213,200 +289,205 @@ func (dpc *DbPackageCreate) SaveX(ctx context.Context) *DbPackage {
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dpc *DbPackageCreate) Exec(ctx context.Context) error {
|
||||
func (dpc *DBPackageCreate) Exec(ctx context.Context) error {
|
||||
_, err := dpc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpc *DbPackageCreate) ExecX(ctx context.Context) {
|
||||
func (dpc *DBPackageCreate) ExecX(ctx context.Context) {
|
||||
if err := dpc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dpc *DbPackageCreate) defaults() {
|
||||
func (dpc *DBPackageCreate) defaults() {
|
||||
if _, ok := dpc.mutation.Status(); !ok {
|
||||
v := dbpackage.DefaultStatus
|
||||
dpc.mutation.SetStatus(v)
|
||||
}
|
||||
if _, ok := dpc.mutation.Lto(); !ok {
|
||||
v := dbpackage.DefaultLto
|
||||
dpc.mutation.SetLto(v)
|
||||
}
|
||||
if _, ok := dpc.mutation.DebugSymbols(); !ok {
|
||||
v := dbpackage.DefaultDebugSymbols
|
||||
dpc.mutation.SetDebugSymbols(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dpc *DbPackageCreate) check() error {
|
||||
func (dpc *DBPackageCreate) check() error {
|
||||
if _, ok := dpc.mutation.Pkgbase(); !ok {
|
||||
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "pkgbase"`)}
|
||||
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
|
||||
}
|
||||
if v, ok := dpc.mutation.Pkgbase(); ok {
|
||||
if err := dbpackage.PkgbaseValidator(v); err != nil {
|
||||
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "pkgbase": %w`, err)}
|
||||
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := dpc.mutation.Status(); ok {
|
||||
if err := dbpackage.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "status": %w`, err)}
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := dpc.mutation.Repository(); !ok {
|
||||
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "repository"`)}
|
||||
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
|
||||
}
|
||||
if v, ok := dpc.mutation.Repository(); ok {
|
||||
if err := dbpackage.RepositoryValidator(v); err != nil {
|
||||
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "repository": %w`, err)}
|
||||
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := dpc.mutation.March(); !ok {
|
||||
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "march"`)}
|
||||
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
|
||||
}
|
||||
if v, ok := dpc.mutation.March(); ok {
|
||||
if err := dbpackage.MarchValidator(v); err != nil {
|
||||
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "march": %w`, err)}
|
||||
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := dpc.mutation.Lto(); ok {
|
||||
if err := dbpackage.LtoValidator(v); err != nil {
|
||||
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := dpc.mutation.DebugSymbols(); ok {
|
||||
if err := dbpackage.DebugSymbolsValidator(v); err != nil {
|
||||
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dpc *DbPackageCreate) sqlSave(ctx context.Context) (*DbPackage, error) {
|
||||
func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
|
||||
if err := dpc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := dpc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, dpc.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int(id)
|
||||
dpc.mutation.id = &_node.ID
|
||||
dpc.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) {
|
||||
func (dpc *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &DbPackage{config: dpc.config}
|
||||
_spec = &sqlgraph.CreateSpec{
|
||||
Table: dbpackage.Table,
|
||||
ID: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: dbpackage.FieldID,
|
||||
},
|
||||
}
|
||||
_node = &DBPackage{config: dpc.config}
|
||||
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||
)
|
||||
if value, ok := dpc.mutation.Pkgbase(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldPkgbase,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value)
|
||||
_node.Pkgbase = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Packages(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeJSON,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldPackages,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldPackages, field.TypeJSON, value)
|
||||
_node.Packages = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Status(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeEnum,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldStatus,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldStatus, field.TypeEnum, value)
|
||||
_node.Status = value
|
||||
}
|
||||
if value, ok := dpc.mutation.SkipReason(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldSkipReason,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldSkipReason, field.TypeString, value)
|
||||
_node.SkipReason = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Repository(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeEnum,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldRepository,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldRepository, field.TypeEnum, value)
|
||||
_node.Repository = value
|
||||
}
|
||||
if value, ok := dpc.mutation.March(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldMarch,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldMarch, field.TypeString, value)
|
||||
_node.March = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Version(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldVersion,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldVersion, field.TypeString, value)
|
||||
_node.Version = value
|
||||
}
|
||||
if value, ok := dpc.mutation.RepoVersion(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldRepoVersion,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldRepoVersion, field.TypeString, value)
|
||||
_node.RepoVersion = value
|
||||
}
|
||||
if value, ok := dpc.mutation.BuildTimeStart(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldBuildTimeStart,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldBuildTimeStart, field.TypeTime, value)
|
||||
_node.BuildTimeStart = value
|
||||
}
|
||||
if value, ok := dpc.mutation.BuildTimeEnd(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldBuildTimeEnd,
|
||||
})
|
||||
_node.BuildTimeEnd = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Updated(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldUpdated,
|
||||
})
|
||||
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
|
||||
_node.Updated = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Hash(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
Value: value,
|
||||
Column: dbpackage.FieldHash,
|
||||
})
|
||||
_node.Hash = value
|
||||
if value, ok := dpc.mutation.Lto(); ok {
|
||||
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
|
||||
_node.Lto = value
|
||||
}
|
||||
if value, ok := dpc.mutation.LastVersionBuild(); ok {
|
||||
_spec.SetField(dbpackage.FieldLastVersionBuild, field.TypeString, value)
|
||||
_node.LastVersionBuild = value
|
||||
}
|
||||
if value, ok := dpc.mutation.LastVerified(); ok {
|
||||
_spec.SetField(dbpackage.FieldLastVerified, field.TypeTime, value)
|
||||
_node.LastVerified = value
|
||||
}
|
||||
if value, ok := dpc.mutation.DebugSymbols(); ok {
|
||||
_spec.SetField(dbpackage.FieldDebugSymbols, field.TypeEnum, value)
|
||||
_node.DebugSymbols = value
|
||||
}
|
||||
if value, ok := dpc.mutation.MaxRss(); ok {
|
||||
_spec.SetField(dbpackage.FieldMaxRss, field.TypeInt64, value)
|
||||
_node.MaxRss = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.UTime(); ok {
|
||||
_spec.SetField(dbpackage.FieldUTime, field.TypeInt64, value)
|
||||
_node.UTime = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.STime(); ok {
|
||||
_spec.SetField(dbpackage.FieldSTime, field.TypeInt64, value)
|
||||
_node.STime = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.IoIn(); ok {
|
||||
_spec.SetField(dbpackage.FieldIoIn, field.TypeInt64, value)
|
||||
_node.IoIn = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.IoOut(); ok {
|
||||
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
|
||||
_node.IoOut = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.TagRev(); ok {
|
||||
_spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
|
||||
_node.TagRev = &value
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// DbPackageCreateBulk is the builder for creating many DbPackage entities in bulk.
|
||||
type DbPackageCreateBulk struct {
|
||||
// DBPackageCreateBulk is the builder for creating many DBPackage entities in bulk.
|
||||
type DBPackageCreateBulk struct {
|
||||
config
|
||||
builders []*DbPackageCreate
|
||||
err error
|
||||
builders []*DBPackageCreate
|
||||
}
|
||||
|
||||
// Save creates the DbPackage entities in the database.
|
||||
func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error) {
|
||||
// Save creates the DBPackage entities in the database.
|
||||
func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
||||
if dpcb.err != nil {
|
||||
return nil, dpcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders))
|
||||
nodes := make([]*DbPackage, len(dpcb.builders))
|
||||
nodes := make([]*DBPackage, len(dpcb.builders))
|
||||
mutators := make([]Mutator, len(dpcb.builders))
|
||||
for i := range dpcb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := dpcb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*DbPackageMutation)
|
||||
mutation, ok := m.(*DBPackageMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
@@ -414,8 +495,8 @@ func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error)
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, dpcb.builders[i+1].mutation)
|
||||
} else {
|
||||
@@ -423,7 +504,7 @@ func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error)
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, dpcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{err.Error(), err}
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -431,11 +512,11 @@ func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error)
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
@@ -453,7 +534,7 @@ func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dpcb *DbPackageCreateBulk) SaveX(ctx context.Context) []*DbPackage {
|
||||
func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
|
||||
v, err := dpcb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -462,13 +543,13 @@ func (dpcb *DbPackageCreateBulk) SaveX(ctx context.Context) []*DbPackage {
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dpcb *DbPackageCreateBulk) Exec(ctx context.Context) error {
|
||||
func (dpcb *DBPackageCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := dpcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpcb *DbPackageCreateBulk) ExecX(ctx context.Context) {
|
||||
func (dpcb *DBPackageCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := dpcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@@ -1,65 +1,37 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"ALHP.go/ent/predicate"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/predicate"
|
||||
)
|
||||
|
||||
// DbPackageDelete is the builder for deleting a DbPackage entity.
|
||||
type DbPackageDelete struct {
|
||||
// DBPackageDelete is the builder for deleting a DBPackage entity.
|
||||
type DBPackageDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *DbPackageMutation
|
||||
mutation *DBPackageMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DbPackageDelete builder.
|
||||
func (dpd *DbPackageDelete) Where(ps ...predicate.DbPackage) *DbPackageDelete {
|
||||
// Where appends a list predicates to the DBPackageDelete builder.
|
||||
func (dpd *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
|
||||
dpd.mutation.Where(ps...)
|
||||
return dpd
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (dpd *DbPackageDelete) Exec(ctx context.Context) (int, error) {
|
||||
var (
|
||||
err error
|
||||
affected int
|
||||
)
|
||||
if len(dpd.hooks) == 0 {
|
||||
affected, err = dpd.sqlExec(ctx)
|
||||
} else {
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*DbPackageMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
dpd.mutation = mutation
|
||||
affected, err = dpd.sqlExec(ctx)
|
||||
mutation.done = true
|
||||
return affected, err
|
||||
})
|
||||
for i := len(dpd.hooks) - 1; i >= 0; i-- {
|
||||
if dpd.hooks[i] == nil {
|
||||
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = dpd.hooks[i](mut)
|
||||
}
|
||||
if _, err := mut.Mutate(ctx, dpd.mutation); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return affected, err
|
||||
func (dpd *DBPackageDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpd *DbPackageDelete) ExecX(ctx context.Context) int {
|
||||
func (dpd *DBPackageDelete) ExecX(ctx context.Context) int {
|
||||
n, err := dpd.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -67,16 +39,8 @@ func (dpd *DbPackageDelete) ExecX(ctx context.Context) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := &sqlgraph.DeleteSpec{
|
||||
Node: &sqlgraph.NodeSpec{
|
||||
Table: dbpackage.Table,
|
||||
ID: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: dbpackage.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
func (dpd *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||
if ps := dpd.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
@@ -84,16 +48,27 @@ func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return sqlgraph.DeleteNodes(ctx, dpd.driver, _spec)
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, dpd.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
dpd.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// DbPackageDeleteOne is the builder for deleting a single DbPackage entity.
|
||||
type DbPackageDeleteOne struct {
|
||||
dpd *DbPackageDelete
|
||||
// DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
|
||||
type DBPackageDeleteOne struct {
|
||||
dpd *DBPackageDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DBPackageDelete builder.
|
||||
func (dpdo *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
|
||||
dpdo.dpd.mutation.Where(ps...)
|
||||
return dpdo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error {
|
||||
func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := dpdo.dpd.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
@@ -106,6 +81,8 @@ func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpdo *DbPackageDeleteOne) ExecX(ctx context.Context) {
|
||||
dpdo.dpd.ExecX(ctx)
|
||||
func (dpdo *DBPackageDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := dpdo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
435
ent/ent.go
435
ent/ent.go
@@ -1,56 +1,89 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
)
|
||||
|
||||
// ent aliases to avoid import conflicts in user's code.
|
||||
type (
|
||||
Op = ent.Op
|
||||
Hook = ent.Hook
|
||||
Value = ent.Value
|
||||
Query = ent.Query
|
||||
Policy = ent.Policy
|
||||
Mutator = ent.Mutator
|
||||
Mutation = ent.Mutation
|
||||
MutateFunc = ent.MutateFunc
|
||||
Op = ent.Op
|
||||
Hook = ent.Hook
|
||||
Value = ent.Value
|
||||
Query = ent.Query
|
||||
QueryContext = ent.QueryContext
|
||||
Querier = ent.Querier
|
||||
QuerierFunc = ent.QuerierFunc
|
||||
Interceptor = ent.Interceptor
|
||||
InterceptFunc = ent.InterceptFunc
|
||||
Traverser = ent.Traverser
|
||||
TraverseFunc = ent.TraverseFunc
|
||||
Policy = ent.Policy
|
||||
Mutator = ent.Mutator
|
||||
Mutation = ent.Mutation
|
||||
MutateFunc = ent.MutateFunc
|
||||
)
|
||||
|
||||
type clientCtxKey struct{}
|
||||
|
||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||
func FromContext(ctx context.Context) *Client {
|
||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Client attached.
|
||||
func NewContext(parent context.Context, c *Client) context.Context {
|
||||
return context.WithValue(parent, clientCtxKey{}, c)
|
||||
}
|
||||
|
||||
type txCtxKey struct{}
|
||||
|
||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||
func TxFromContext(ctx context.Context) *Tx {
|
||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||
return tx
|
||||
}
|
||||
|
||||
// NewTxContext returns a new context with the given Tx attached.
|
||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
return context.WithValue(parent, txCtxKey{}, tx)
|
||||
}
|
||||
|
||||
// OrderFunc applies an ordering on the sql selector.
|
||||
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
||||
type OrderFunc func(*sql.Selector)
|
||||
|
||||
// columnChecker returns a function indicates if the column exists in the given column.
|
||||
func columnChecker(table string) func(string) error {
|
||||
checks := map[string]func(string) bool{
|
||||
dbpackage.Table: dbpackage.ValidColumn,
|
||||
}
|
||||
check, ok := checks[table]
|
||||
if !ok {
|
||||
return func(string) error {
|
||||
return fmt.Errorf("unknown table %q", table)
|
||||
}
|
||||
}
|
||||
return func(column string) error {
|
||||
if !check(column) {
|
||||
return fmt.Errorf("unknown column %q for table %q", column, table)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
initCheck sync.Once
|
||||
columnCheck sql.ColumnCheck
|
||||
)
|
||||
|
||||
// checkColumn checks if the column exists in the given table.
|
||||
func checkColumn(table, column string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
dbpackage.Table: dbpackage.ValidColumn,
|
||||
})
|
||||
})
|
||||
return columnCheck(table, column)
|
||||
}
|
||||
|
||||
// Asc applies the given fields in ASC order.
|
||||
func Asc(fields ...string) OrderFunc {
|
||||
func Asc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
check := columnChecker(s.TableName())
|
||||
for _, f := range fields {
|
||||
if err := check(f); err != nil {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Asc(s.C(f)))
|
||||
@@ -59,11 +92,10 @@ func Asc(fields ...string) OrderFunc {
|
||||
}
|
||||
|
||||
// Desc applies the given fields in DESC order.
|
||||
func Desc(fields ...string) OrderFunc {
|
||||
func Desc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
check := columnChecker(s.TableName())
|
||||
for _, f := range fields {
|
||||
if err := check(f); err != nil {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Desc(s.C(f)))
|
||||
@@ -79,7 +111,6 @@ type AggregateFunc func(*sql.Selector) string
|
||||
// GroupBy(field1, field2).
|
||||
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
|
||||
// Scan(ctx, &v)
|
||||
//
|
||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.As(fn(s), end)
|
||||
@@ -96,8 +127,7 @@ func Count() AggregateFunc {
|
||||
// Max applies the "max" aggregation function on the given field of each group.
|
||||
func Max(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -108,8 +138,7 @@ func Max(field string) AggregateFunc {
|
||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||
func Mean(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -120,8 +149,7 @@ func Mean(field string) AggregateFunc {
|
||||
// Min applies the "min" aggregation function on the given field of each group.
|
||||
func Min(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -132,8 +160,7 @@ func Min(field string) AggregateFunc {
|
||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||
func Sum(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -141,7 +168,7 @@ func Sum(field string) AggregateFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidationError returns when validating a field fails.
|
||||
// ValidationError returns when validating a field or edge fails.
|
||||
type ValidationError struct {
|
||||
Name string // Field or edge name.
|
||||
err error
|
||||
@@ -257,3 +284,325 @@ func IsConstraintError(err error) bool {
|
||||
var e *ConstraintError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// selector embedded by the different Select/GroupBy builders.
|
||||
type selector struct {
|
||||
label string
|
||||
flds *[]string
|
||||
fns []AggregateFunc
|
||||
scan func(context.Context, any) error
|
||||
}
|
||||
|
||||
// ScanX is like Scan, but panics if an error occurs.
|
||||
func (s *selector) ScanX(ctx context.Context, v any) {
|
||||
if err := s.scan(ctx, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Strings(ctx context.Context) ([]string, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []string
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// StringsX is like Strings, but panics if an error occurs.
|
||||
func (s *selector) StringsX(ctx context.Context) []string {
|
||||
v, err := s.Strings(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// String returns a single string from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) String(ctx context.Context) (_ string, err error) {
|
||||
var v []string
|
||||
if v, err = s.Strings(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StringX is like String, but panics if an error occurs.
|
||||
func (s *selector) StringX(ctx context.Context) string {
|
||||
v, err := s.String(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Ints(ctx context.Context) ([]int, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []int
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IntsX is like Ints, but panics if an error occurs.
|
||||
func (s *selector) IntsX(ctx context.Context) []int {
|
||||
v, err := s.Ints(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Int returns a single int from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Int(ctx context.Context) (_ int, err error) {
|
||||
var v []int
|
||||
if v, err = s.Ints(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IntX is like Int, but panics if an error occurs.
|
||||
func (s *selector) IntX(ctx context.Context) int {
|
||||
v, err := s.Int(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []float64
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Float64sX is like Float64s, but panics if an error occurs.
|
||||
func (s *selector) Float64sX(ctx context.Context) []float64 {
|
||||
v, err := s.Float64s(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
|
||||
var v []float64
|
||||
if v, err = s.Float64s(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Float64X is like Float64, but panics if an error occurs.
|
||||
func (s *selector) Float64X(ctx context.Context) float64 {
|
||||
v, err := s.Float64(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []bool
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// BoolsX is like Bools, but panics if an error occurs.
|
||||
func (s *selector) BoolsX(ctx context.Context) []bool {
|
||||
v, err := s.Bools(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
|
||||
var v []bool
|
||||
if v, err = s.Bools(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BoolX is like Bool, but panics if an error occurs.
|
||||
func (s *selector) BoolX(ctx context.Context) bool {
|
||||
v, err := s.Bool(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// withHooks invokes the builder operation with the given hooks, if any.
|
||||
func withHooks[V Value, M any, PM interface {
|
||||
*M
|
||||
Mutation
|
||||
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
|
||||
if len(hooks) == 0 {
|
||||
return exec(ctx)
|
||||
}
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutationT, ok := any(m).(PM)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
// Set the mutation to the builder.
|
||||
*mutation = *mutationT
|
||||
return exec(ctx)
|
||||
})
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
if hooks[i] == nil {
|
||||
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = hooks[i](mut)
|
||||
}
|
||||
v, err := mut.Mutate(ctx, mutation)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
nv, ok := v.(V)
|
||||
if !ok {
|
||||
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
|
||||
}
|
||||
return nv, nil
|
||||
}
|
||||
|
||||
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
|
||||
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
|
||||
if ent.QueryFromContext(ctx) == nil {
|
||||
qc.Op = op
|
||||
ctx = ent.NewQueryContext(ctx, qc)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func querierAll[V Value, Q interface {
|
||||
sqlAll(context.Context, ...queryHook) (V, error)
|
||||
}]() Querier {
|
||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
return query.sqlAll(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func querierCount[Q interface {
|
||||
sqlCount(context.Context) (int, error)
|
||||
}]() Querier {
|
||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
return query.sqlCount(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
|
||||
for i := len(inters) - 1; i >= 0; i-- {
|
||||
qr = inters[i].Intercept(qr)
|
||||
}
|
||||
rv, err := qr.Query(ctx, q)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
vt, ok := rv.(V)
|
||||
if !ok {
|
||||
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
|
||||
}
|
||||
return vt, nil
|
||||
}
|
||||
|
||||
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
|
||||
sqlScan(context.Context, Q1, any) error
|
||||
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
|
||||
rv := reflect.ValueOf(v)
|
||||
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q1)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
|
||||
return rv.Elem().Interface(), nil
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
for i := len(inters) - 1; i >= 0; i-- {
|
||||
qr = inters[i].Intercept(qr)
|
||||
}
|
||||
vv, err := qr.Query(ctx, rootQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch rv2 := reflect.ValueOf(vv); {
|
||||
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
|
||||
case rv.Type() == rv2.Type():
|
||||
rv.Elem().Set(rv2.Elem())
|
||||
case rv.Elem().Type() == rv2.Type():
|
||||
rv.Elem().Set(rv2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryHook describes an internal hook for the different sqlAll methods.
|
||||
type queryHook func(context.Context, *sqlgraph.QuerySpec)
|
||||
|
@@ -1,15 +1,16 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package enttest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"ALHP.go/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
// required by schema hooks.
|
||||
_ "ALHP.go/ent/runtime"
|
||||
_ "somegit.dev/ALHP/ALHP.GO/ent/runtime"
|
||||
|
||||
"entgo.io/ent/dialect/sql/schema"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -17,7 +18,7 @@ type (
|
||||
// testing.T and testing.B and used by enttest.
|
||||
TestingT interface {
|
||||
FailNow()
|
||||
Error(...interface{})
|
||||
Error(...any)
|
||||
}
|
||||
|
||||
// Option configures client creation.
|
||||
@@ -59,10 +60,7 @@ func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Cl
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
if err := c.Schema.Create(context.Background(), o.migrateOpts...); err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
migrateSchema(t, c, o)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -70,9 +68,17 @@ func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Cl
|
||||
func NewClient(t TestingT, opts ...Option) *ent.Client {
|
||||
o := newOptions(opts)
|
||||
c := ent.NewClient(o.opts...)
|
||||
if err := c.Schema.Create(context.Background(), o.migrateOpts...); err != nil {
|
||||
migrateSchema(t, c, o)
|
||||
return c
|
||||
}
|
||||
func migrateSchema(t TestingT, c *ent.Client, o *options) {
|
||||
tables, err := schema.CopyTables(migrate.Tables)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package hook
|
||||
|
||||
@@ -6,20 +6,19 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"ALHP.go/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
)
|
||||
|
||||
// The DbPackageFunc type is an adapter to allow the use of ordinary
|
||||
// function as DbPackage mutator.
|
||||
type DbPackageFunc func(context.Context, *ent.DbPackageMutation) (ent.Value, error)
|
||||
// The DBPackageFunc type is an adapter to allow the use of ordinary
|
||||
// function as DBPackage mutator.
|
||||
type DBPackageFunc func(context.Context, *ent.DBPackageMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f DbPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
mv, ok := m.(*ent.DbPackageMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DbPackageMutation", m)
|
||||
func (f DBPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.DBPackageMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return f(ctx, mv)
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DBPackageMutation", m)
|
||||
}
|
||||
|
||||
// Condition is a hook condition function.
|
||||
@@ -117,7 +116,6 @@ func HasFields(field string, fields ...string) Condition {
|
||||
// If executes the given hook under condition.
|
||||
//
|
||||
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
|
||||
//
|
||||
func If(hk ent.Hook, cond Condition) ent.Hook {
|
||||
return func(next ent.Mutator) ent.Mutator {
|
||||
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
@@ -132,7 +130,6 @@ func If(hk ent.Hook, cond Condition) ent.Hook {
|
||||
// On executes the given hook only for the given operation.
|
||||
//
|
||||
// hook.On(Log, ent.Delete|ent.Create)
|
||||
//
|
||||
func On(hk ent.Hook, op ent.Op) ent.Hook {
|
||||
return If(hk, HasOp(op))
|
||||
}
|
||||
@@ -140,7 +137,6 @@ func On(hk ent.Hook, op ent.Op) ent.Hook {
|
||||
// Unless skips the given hook only for the given operation.
|
||||
//
|
||||
// hook.Unless(Log, ent.Update|ent.UpdateOne)
|
||||
//
|
||||
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
|
||||
return If(hk, Not(HasOp(op)))
|
||||
}
|
||||
@@ -161,7 +157,6 @@ func FixedError(err error) ent.Hook {
|
||||
// Reject(ent.Delete|ent.Update),
|
||||
// }
|
||||
// }
|
||||
//
|
||||
func Reject(op ent.Op) ent.Hook {
|
||||
hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
|
||||
return On(hk, op)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
@@ -28,17 +28,13 @@ var (
|
||||
// and therefore, it's recommended to enable this option to get more
|
||||
// flexibility in the schema changes.
|
||||
WithDropIndex = schema.WithDropIndex
|
||||
// WithFixture sets the foreign-key renaming option to the migration when upgrading
|
||||
// ent from v0.1.0 (issue-#285). Defaults to false.
|
||||
WithFixture = schema.WithFixture
|
||||
// WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true.
|
||||
WithForeignKeys = schema.WithForeignKeys
|
||||
)
|
||||
|
||||
// Schema is the API for creating, migrating and dropping a schema.
|
||||
type Schema struct {
|
||||
drv dialect.Driver
|
||||
universalID bool
|
||||
drv dialect.Driver
|
||||
}
|
||||
|
||||
// NewSchema creates a new schema client.
|
||||
@@ -46,27 +42,23 @@ func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} }
|
||||
|
||||
// Create creates all schema resources.
|
||||
func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error {
|
||||
return Create(ctx, s, Tables, opts...)
|
||||
}
|
||||
|
||||
// Create creates all table resources using the given schema driver.
|
||||
func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error {
|
||||
migrate, err := schema.NewMigrate(s.drv, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ent/migrate: %w", err)
|
||||
}
|
||||
return migrate.Create(ctx, Tables...)
|
||||
return migrate.Create(ctx, tables...)
|
||||
}
|
||||
|
||||
// WriteTo writes the schema changes to w instead of running them against the database.
|
||||
//
|
||||
// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
|
||||
// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// }
|
||||
func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error {
|
||||
drv := &schema.WriteDriver{
|
||||
Writer: w,
|
||||
Driver: s.drv,
|
||||
}
|
||||
migrate, err := schema.NewMigrate(drv, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ent/migrate: %w", err)
|
||||
}
|
||||
return migrate.Create(ctx, Tables...)
|
||||
return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...)
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
@@ -11,18 +11,26 @@ var (
|
||||
// DbPackagesColumns holds the columns for the "db_packages" table.
|
||||
DbPackagesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||
{Name: "pkgbase", Type: field.TypeString, Unique: true},
|
||||
{Name: "pkgbase", Type: field.TypeString},
|
||||
{Name: "packages", Type: field.TypeJSON, Nullable: true},
|
||||
{Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "build", "queued", "building", "latest", "signing", "unknown"}, Default: "unknown"},
|
||||
{Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "built", "queued", "delayed", "building", "latest", "signing", "unknown"}, Default: "unknown"},
|
||||
{Name: "skip_reason", Type: field.TypeString, Nullable: true},
|
||||
{Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core", "community"}},
|
||||
{Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core", "multilib"}},
|
||||
{Name: "march", Type: field.TypeString},
|
||||
{Name: "version", Type: field.TypeString, Nullable: true},
|
||||
{Name: "repo_version", Type: field.TypeString, Nullable: true},
|
||||
{Name: "build_time_start", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "build_time_end", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "updated", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "hash", Type: field.TypeString, Nullable: true},
|
||||
{Name: "lto", Type: field.TypeEnum, Nullable: true, Enums: []string{"enabled", "unknown", "disabled", "auto_disabled"}, Default: "unknown"},
|
||||
{Name: "last_version_build", Type: field.TypeString, Nullable: true},
|
||||
{Name: "last_verified", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "debug_symbols", Type: field.TypeEnum, Nullable: true, Enums: []string{"available", "unknown", "not_available"}, Default: "unknown"},
|
||||
{Name: "max_rss", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "u_time", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "s_time", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "io_in", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "io_out", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "tag_rev", Type: field.TypeString, Nullable: true},
|
||||
}
|
||||
// DbPackagesTable holds the schema information for the "db_packages" table.
|
||||
DbPackagesTable = &schema.Table{
|
||||
|
1377
ent/mutation.go
1377
ent/mutation.go
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package predicate
|
||||
|
||||
@@ -6,5 +6,5 @@ import (
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
// DbPackage is the predicate function for dbpackage builders.
|
||||
type DbPackage func(*sql.Selector)
|
||||
// DBPackage is the predicate function for dbpackage builders.
|
||||
type DBPackage func(*sql.Selector)
|
||||
|
@@ -1,17 +1,17 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"ALHP.go/ent/schema"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/schema"
|
||||
)
|
||||
|
||||
// The init function reads all schema descriptors with runtime code
|
||||
// (default values, validators, hooks and policies) and stitches it
|
||||
// to their package variables.
|
||||
func init() {
|
||||
dbpackageFields := schema.DbPackage{}.Fields()
|
||||
dbpackageFields := schema.DBPackage{}.Fields()
|
||||
_ = dbpackageFields
|
||||
// dbpackageDescPkgbase is the schema descriptor for pkgbase field.
|
||||
dbpackageDescPkgbase := dbpackageFields[0].Descriptor()
|
||||
|
@@ -1,10 +1,10 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package runtime
|
||||
|
||||
// The schema-stitching logic is generated in ALHP.go/ent/runtime.go
|
||||
// The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go
|
||||
|
||||
const (
|
||||
Version = "v0.9.1" // Version of ent codegen.
|
||||
Sum = "h1:IG8andyeD79GG24U8Q+1Y45hQXj6gY5evSBcva5gtBk=" // Sum of ent codegen.
|
||||
Version = "v0.14.2" // Version of ent codegen.
|
||||
Sum = "h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0=" // Sum of ent codegen.
|
||||
)
|
||||
|
@@ -5,30 +5,39 @@ import (
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// DbPackage holds the schema definition for the DbPackage entity.
|
||||
type DbPackage struct {
|
||||
// DBPackage holds the schema definition for the DbPackage entity.
|
||||
type DBPackage struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
// Fields of the DbPackage.
|
||||
func (DbPackage) Fields() []ent.Field {
|
||||
// Fields of the DBPackage.
|
||||
func (DBPackage) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.String("pkgbase").NotEmpty().Immutable().Unique(),
|
||||
field.String("pkgbase").NotEmpty().Immutable(),
|
||||
field.Strings("packages").Optional(),
|
||||
field.Enum("status").Values("skipped", "failed", "build", "queued", "building", "latest", "signing", "unknown").Default("unknown").Optional(),
|
||||
field.Enum("status").Values("skipped", "failed", "built", "queued", "delayed", "building",
|
||||
"latest", "signing", "unknown").Default("unknown").Optional(),
|
||||
field.String("skip_reason").Optional(),
|
||||
field.Enum("repository").Values("extra", "core", "community"),
|
||||
field.String("march").NotEmpty(),
|
||||
field.Enum("repository").Values("extra", "core", "multilib"),
|
||||
field.String("march").NotEmpty().Immutable(),
|
||||
field.String("version").Optional(),
|
||||
field.String("repo_version").Optional(),
|
||||
field.Time("build_time_start").Optional(),
|
||||
field.Time("build_time_end").Optional(),
|
||||
field.Time("updated").Optional(),
|
||||
field.String("hash").Optional(),
|
||||
field.Enum("lto").Values("enabled", "unknown", "disabled", "auto_disabled").Default("unknown").Optional(),
|
||||
field.String("last_version_build").Optional(),
|
||||
field.Time("last_verified").Optional(),
|
||||
field.Enum("debug_symbols").Values("available", "unknown", "not_available").Default("unknown").Optional(),
|
||||
field.Int64("max_rss").Optional().Nillable(),
|
||||
field.Int64("u_time").Optional().Nillable(),
|
||||
field.Int64("s_time").Optional().Nillable(),
|
||||
field.Int64("io_in").Optional().Nillable(),
|
||||
field.Int64("io_out").Optional().Nillable(),
|
||||
field.String("tag_rev").Optional().Nillable(),
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the DbPackage.
|
||||
func (DbPackage) Edges() []ent.Edge {
|
||||
// Edges of the DBPackage.
|
||||
func (DBPackage) Edges() []ent.Edge {
|
||||
return nil
|
||||
}
|
||||
|
58
ent/tx.go
58
ent/tx.go
@@ -1,4 +1,4 @@
|
||||
// Code generated by entc, DO NOT EDIT.
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
@@ -12,25 +12,19 @@ import (
|
||||
// Tx is a transactional client that is created by calling Client.Tx().
|
||||
type Tx struct {
|
||||
config
|
||||
// DbPackage is the client for interacting with the DbPackage builders.
|
||||
DbPackage *DbPackageClient
|
||||
// DBPackage is the client for interacting with the DBPackage builders.
|
||||
DBPackage *DBPackageClient
|
||||
|
||||
// lazily loaded.
|
||||
client *Client
|
||||
clientOnce sync.Once
|
||||
|
||||
// completion callbacks.
|
||||
mu sync.Mutex
|
||||
onCommit []CommitHook
|
||||
onRollback []RollbackHook
|
||||
|
||||
// ctx lives for the life of the transaction. It is
|
||||
// the same context used by the underlying connection.
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type (
|
||||
// Committer is the interface that wraps the Committer method.
|
||||
// Committer is the interface that wraps the Commit method.
|
||||
Committer interface {
|
||||
Commit(context.Context, *Tx) error
|
||||
}
|
||||
@@ -44,7 +38,7 @@ type (
|
||||
// and returns a Committer. For example:
|
||||
//
|
||||
// hook := func(next ent.Committer) ent.Committer {
|
||||
// return ent.CommitFunc(func(context.Context, tx *ent.Tx) error {
|
||||
// return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error {
|
||||
// // Do some stuff before.
|
||||
// if err := next.Commit(ctx, tx); err != nil {
|
||||
// return err
|
||||
@@ -68,9 +62,9 @@ func (tx *Tx) Commit() error {
|
||||
var fn Committer = CommitFunc(func(context.Context, *Tx) error {
|
||||
return txDriver.tx.Commit()
|
||||
})
|
||||
tx.mu.Lock()
|
||||
hooks := append([]CommitHook(nil), tx.onCommit...)
|
||||
tx.mu.Unlock()
|
||||
txDriver.mu.Lock()
|
||||
hooks := append([]CommitHook(nil), txDriver.onCommit...)
|
||||
txDriver.mu.Unlock()
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
fn = hooks[i](fn)
|
||||
}
|
||||
@@ -79,13 +73,14 @@ func (tx *Tx) Commit() error {
|
||||
|
||||
// OnCommit adds a hook to call on commit.
|
||||
func (tx *Tx) OnCommit(f CommitHook) {
|
||||
tx.mu.Lock()
|
||||
defer tx.mu.Unlock()
|
||||
tx.onCommit = append(tx.onCommit, f)
|
||||
txDriver := tx.config.driver.(*txDriver)
|
||||
txDriver.mu.Lock()
|
||||
txDriver.onCommit = append(txDriver.onCommit, f)
|
||||
txDriver.mu.Unlock()
|
||||
}
|
||||
|
||||
type (
|
||||
// Rollbacker is the interface that wraps the Rollbacker method.
|
||||
// Rollbacker is the interface that wraps the Rollback method.
|
||||
Rollbacker interface {
|
||||
Rollback(context.Context, *Tx) error
|
||||
}
|
||||
@@ -99,7 +94,7 @@ type (
|
||||
// and returns a Rollbacker. For example:
|
||||
//
|
||||
// hook := func(next ent.Rollbacker) ent.Rollbacker {
|
||||
// return ent.RollbackFunc(func(context.Context, tx *ent.Tx) error {
|
||||
// return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error {
|
||||
// // Do some stuff before.
|
||||
// if err := next.Rollback(ctx, tx); err != nil {
|
||||
// return err
|
||||
@@ -123,9 +118,9 @@ func (tx *Tx) Rollback() error {
|
||||
var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
|
||||
return txDriver.tx.Rollback()
|
||||
})
|
||||
tx.mu.Lock()
|
||||
hooks := append([]RollbackHook(nil), tx.onRollback...)
|
||||
tx.mu.Unlock()
|
||||
txDriver.mu.Lock()
|
||||
hooks := append([]RollbackHook(nil), txDriver.onRollback...)
|
||||
txDriver.mu.Unlock()
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
fn = hooks[i](fn)
|
||||
}
|
||||
@@ -134,9 +129,10 @@ func (tx *Tx) Rollback() error {
|
||||
|
||||
// OnRollback adds a hook to call on rollback.
|
||||
func (tx *Tx) OnRollback(f RollbackHook) {
|
||||
tx.mu.Lock()
|
||||
defer tx.mu.Unlock()
|
||||
tx.onRollback = append(tx.onRollback, f)
|
||||
txDriver := tx.config.driver.(*txDriver)
|
||||
txDriver.mu.Lock()
|
||||
txDriver.onRollback = append(txDriver.onRollback, f)
|
||||
txDriver.mu.Unlock()
|
||||
}
|
||||
|
||||
// Client returns a Client that binds to current transaction.
|
||||
@@ -149,7 +145,7 @@ func (tx *Tx) Client() *Client {
|
||||
}
|
||||
|
||||
func (tx *Tx) init() {
|
||||
tx.DbPackage = NewDbPackageClient(tx.config)
|
||||
tx.DBPackage = NewDBPackageClient(tx.config)
|
||||
}
|
||||
|
||||
// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
|
||||
@@ -159,7 +155,7 @@ func (tx *Tx) init() {
|
||||
// of them in order to commit or rollback the transaction.
|
||||
//
|
||||
// If a closed transaction is embedded in one of the generated entities, and the entity
|
||||
// applies a query, for example: DbPackage.QueryXXX(), the query will be executed
|
||||
// applies a query, for example: DBPackage.QueryXXX(), the query will be executed
|
||||
// through the driver which created this transaction.
|
||||
//
|
||||
// Note that txDriver is not goroutine safe.
|
||||
@@ -168,6 +164,10 @@ type txDriver struct {
|
||||
drv dialect.Driver
|
||||
// tx is the underlying transaction.
|
||||
tx dialect.Tx
|
||||
// completion hooks.
|
||||
mu sync.Mutex
|
||||
onCommit []CommitHook
|
||||
onRollback []RollbackHook
|
||||
}
|
||||
|
||||
// newTx creates a new transactional driver.
|
||||
@@ -198,12 +198,12 @@ func (*txDriver) Commit() error { return nil }
|
||||
func (*txDriver) Rollback() error { return nil }
|
||||
|
||||
// Exec calls tx.Exec.
|
||||
func (tx *txDriver) Exec(ctx context.Context, query string, args, v interface{}) error {
|
||||
func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error {
|
||||
return tx.tx.Exec(ctx, query, args, v)
|
||||
}
|
||||
|
||||
// Query calls tx.Query.
|
||||
func (tx *txDriver) Query(ctx context.Context, query string, args, v interface{}) error {
|
||||
func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error {
|
||||
return tx.tx.Query(ctx, query, args, v)
|
||||
}
|
||||
|
||||
|
44
flags.yaml
Normal file
44
flags.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
# template values get replaced on makepkg.conf generation
|
||||
# $level$ -> march x86-64 level, e.g. v3
|
||||
# $march$ -> full march, e.g. x86-64-v3
|
||||
# $buildproc$ -> number of threads to build with
|
||||
|
||||
common:
|
||||
cflags:
|
||||
- "-mtune=generic": ~
|
||||
- "-O2": "-O3"
|
||||
- "-mpclmul" # https://somegit.dev/ALHP/ALHP.GO/issues/92
|
||||
- "-march=x86-64": "-march=$march$"
|
||||
|
||||
options:
|
||||
- "lto": "!lto" # disable lto; see 'lto' section below
|
||||
|
||||
buildenv:
|
||||
- "color": "!color" # color messes up the log output
|
||||
|
||||
goamd64: "$level$" # https://somegit.dev/ALHP/ALHP.GO/issues/116
|
||||
packager: "ALHP $march$ <alhp@harting.dev>"
|
||||
makeflags: "-j$buildproc$"
|
||||
# https://somegit.dev/ALHP/ALHP.GO/issues/110
|
||||
rustflags:
|
||||
- "-Copt-level=3"
|
||||
- "-Ctarget-cpu=$march$"
|
||||
- "-Clink-arg=-z"
|
||||
- "-Clink-arg=pack-relative-relocs"
|
||||
ltoflags:
|
||||
- "-falign-functions=32" # https://github.com/InBetweenNames/gentooLTO/issues/164
|
||||
kcflags: " -march=$march$ -O3"
|
||||
kcppflags: " -march=$march$ -O3"
|
||||
fcflags: "$FFLAGS"
|
||||
fflags:
|
||||
- "-O2": "-O3"
|
||||
- "-march=$march$"
|
||||
|
||||
lto:
|
||||
rustflags:
|
||||
- "-Ccodegen-units=1"
|
||||
|
||||
options:
|
||||
- "!lto": "lto"
|
||||
|
||||
cargo_profile_release_lto: "fat"
|
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
cd "$1" && git clean -xdff
|
61
go.mod
61
go.mod
@@ -1,17 +1,58 @@
|
||||
module ALHP.go
|
||||
module somegit.dev/ALHP/ALHP.GO
|
||||
|
||||
go 1.16
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.1
|
||||
|
||||
require (
|
||||
entgo.io/ent v0.9.1
|
||||
github.com/Jguer/go-alpm/v2 v2.0.6
|
||||
entgo.io/ent v0.14.3
|
||||
github.com/Jguer/go-alpm/v2 v2.2.2
|
||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5
|
||||
github.com/Morganamilo/go-srcinfo v1.0.0
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||
github.com/jackc/pgx/v4 v4.13.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
|
||||
golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70 // indirect
|
||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/jackc/pgx/v4 v4.18.3
|
||||
github.com/otiai10/copy v1.14.1
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/sethvargo/go-retry v0.3.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
lukechampine.com/blake3 v1.1.6
|
||||
)
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.32.0 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||
github.com/go-openapi/inflect v0.21.1 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.23.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.14.3 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgtype v1.14.4 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/zclconf/go-cty v1.16.2 // indirect
|
||||
github.com/zclconf/go-cty-yaml v1.1.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
)
|
||||
|
499
go.sum
499
go.sum
@@ -1,142 +1,64 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
entgo.io/ent v0.9.1 h1:IG8andyeD79GG24U8Q+1Y45hQXj6gY5evSBcva5gtBk=
|
||||
entgo.io/ent v0.9.1/go.mod h1:6NUeTfUN5mp5YN+5tgoH1SlakSvYPTBOYotSOvaI4ak=
|
||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 h1:nX4HXncwIdvQ8/8sIUIf1nyCkK8qdBaHQ7EtzPpuiGE=
|
||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
|
||||
ariga.io/atlas v0.32.0 h1:y+77nueMrExLiKlz1CcPKh/nU7VSlWfBbwCShsJyvCw=
|
||||
ariga.io/atlas v0.32.0/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
|
||||
entgo.io/ent v0.14.2 h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0=
|
||||
entgo.io/ent v0.14.2/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM=
|
||||
entgo.io/ent v0.14.3 h1:wokAV/kIlH9TeklJWGGS7AYJdVckr0DloWjIcO9iIIQ=
|
||||
entgo.io/ent v0.14.3/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/Jguer/go-alpm/v2 v2.0.6 h1:VLavW5qFk/TJRHT3Gg7ClDukU4MNWdfkWM9yLxVkYyQ=
|
||||
github.com/Jguer/go-alpm/v2 v2.0.6/go.mod h1:zU4iKCtNkDARfj5BrKJXYAQ5nIjtZbySfa0paboSmTQ=
|
||||
github.com/Jguer/go-alpm/v2 v2.2.2 h1:sPwUoZp1X5Tw6K6Ba1lWvVJfcgVNEGVcxARLBttZnC0=
|
||||
github.com/Jguer/go-alpm/v2 v2.2.2/go.mod h1:lfe8gSe83F/KERaQvEfrSqQ4n+8bES+ZIyKWR/gm3MI=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c=
|
||||
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5/go.mod h1:Hk55m330jNiwxRodIlMCvw5iEyoRUCIY64W1p9D+tHc=
|
||||
github.com/Morganamilo/go-srcinfo v1.0.0 h1:Wh4nEF+HJWo+29hnxM18Q2hi+DUf0GejS13+Wg+dzmI=
|
||||
github.com/Morganamilo/go-srcinfo v1.0.0/go.mod h1:MP6VGY1NNpVUmYIEgoM9acix95KQqIRyqQ0hCLsyYUY=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
|
||||
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
|
||||
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368/go.mod h1:7xCgX1lzlrXPHkfvn3EhumqHkmSlzt8at9q7v0ax19c=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
|
||||
github.com/go-sql-driver/mysql v1.5.1-0.20200311113236-681ffa848bae/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
|
||||
github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
||||
github.com/go-openapi/inflect v0.21.1 h1:swwdJV4YPbuQaz68rHiBeQj+MWeBjDDNyEAi78Fhu4g=
|
||||
github.com/go-openapi/inflect v0.21.1/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos=
|
||||
github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
|
||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
@@ -148,8 +70,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU
|
||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
||||
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||
github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU=
|
||||
github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
|
||||
github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
@@ -165,147 +87,127 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI=
|
||||
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
|
||||
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
||||
github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs=
|
||||
github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||
github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
|
||||
github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
||||
github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570=
|
||||
github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0=
|
||||
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
||||
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
|
||||
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
|
||||
github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
|
||||
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
|
||||
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
|
||||
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 h1:shC1HB1UogxN5Ech3Yqaaxj1X/P656PPCB4RbojIJqc=
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117 h1:67A5tweHp3C7osHjrYsy6pQZ00bYkTTttZ7kiOwwHeA=
|
||||
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70=
|
||||
github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
|
||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
|
||||
github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0=
|
||||
github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
@@ -317,193 +219,122 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70 h1:SeSEfdIxyvwGJliREIJhRPPXvW6sDlLT+UQ3B0hD0NA=
|
||||
golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
|
||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
355
housekeeping.go
Normal file
355
housekeeping.go
Normal file
@@ -0,0 +1,355 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) error {
|
||||
defer wg.Done()
|
||||
fullRepo := repo + "-" + march
|
||||
log.Debugf("[%s] start housekeeping", fullRepo)
|
||||
packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("[HK/%s] removing orphans, signature check", fullRepo)
|
||||
for _, path := range packages {
|
||||
mPackage := Package(path)
|
||||
|
||||
dbPkg, err := mPackage.DBPackage(ctx, db)
|
||||
if ent.IsNotFound(err) {
|
||||
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
|
||||
pkg := &ProtoPackage{
|
||||
FullRepo: *mPackage.FullRepo(),
|
||||
PkgFiles: []string{path},
|
||||
March: *mPackage.MArch(),
|
||||
}
|
||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Warningf("[HK] error fetching %s->%q from db: %v", fullRepo, path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pkg := &ProtoPackage{
|
||||
Pkgbase: dbPkg.Pkgbase,
|
||||
Repo: mPackage.Repo(),
|
||||
FullRepo: *mPackage.FullRepo(),
|
||||
DBPackage: dbPkg,
|
||||
March: *mPackage.MArch(),
|
||||
Arch: *mPackage.Arch(),
|
||||
}
|
||||
|
||||
// check if package is still part of repo
|
||||
dbs, err := alpmHandle.SyncDBs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buildManager.alpmMutex.Lock()
|
||||
pkgResolved, err := dbs.FindSatisfier(mPackage.Name())
|
||||
buildManager.alpmMutex.Unlock()
|
||||
if err != nil ||
|
||||
pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() ||
|
||||
pkgResolved.DB().Name() != pkg.Repo.String() ||
|
||||
pkgResolved.Architecture() != pkg.Arch ||
|
||||
pkgResolved.Name() != mPackage.Name() ||
|
||||
MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages) {
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Infof("[HK] %s->%s not included in repo (resolve error: %v)", pkg.FullRepo, mPackage.Name(), err)
|
||||
case pkgResolved.DB().Name() != pkg.DBPackage.Repository.String():
|
||||
log.Infof("[HK] %s->%s not included in repo (repo mismatch: repo:%s != db:%s)", pkg.FullRepo,
|
||||
mPackage.Name(), pkgResolved.DB().Name(), pkg.DBPackage.Repository.String())
|
||||
case pkgResolved.DB().Name() != pkg.Repo.String():
|
||||
log.Infof("[HK] %s->%s not included in repo (repo mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
||||
mPackage.Name(), pkgResolved.DB().Name(), pkg.Repo.String())
|
||||
case pkgResolved.Architecture() != pkg.Arch:
|
||||
log.Infof("[HK] %s->%s not included in repo (arch mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
||||
mPackage.Name(), pkgResolved.Architecture(), pkg.Arch)
|
||||
case pkgResolved.Name() != mPackage.Name():
|
||||
log.Infof("[HK] %s->%s not included in repo (name mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
|
||||
mPackage.Name(), pkgResolved.Name(), mPackage.Name())
|
||||
case MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
|
||||
log.Infof("[HK] %s->%s not included in repo (blacklisted pkgbase %s)", pkg.FullRepo, mPackage.Name(), pkg.Pkgbase)
|
||||
}
|
||||
|
||||
// package not found on mirror/db -> not part of any repo anymore
|
||||
err = pkg.findPkgFiles()
|
||||
if err != nil {
|
||||
log.Errorf("[HK] %s->%s unable to get pkg-files: %v", pkg.FullRepo, mPackage.Name(), err)
|
||||
continue
|
||||
}
|
||||
err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(ctx)
|
||||
pkg.DBPackage = nil
|
||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) {
|
||||
err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// check if pkg signature is valid
|
||||
valid, err := mPackage.HasValidSignature()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !valid {
|
||||
log.Infof("[HK] %s->%s invalid package signature", pkg.FullRepo, pkg.Pkgbase)
|
||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// compare db-version with repo version
|
||||
repoVer, err := pkg.repoVersion()
|
||||
if err == nil && repoVer != dbPkg.RepoVersion {
|
||||
log.Infof("[HK] %s->%s update repoVersion %s->%s", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer)
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearTagRev().Save(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check all packages from db for existence
|
||||
dbPackages, err := db.DBPackage.Query().Where(
|
||||
dbpackage.And(
|
||||
dbpackage.RepositoryEQ(dbpackage.Repository(repo)),
|
||||
dbpackage.March(march),
|
||||
)).All(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("[HK/%s] checking %d packages from database", fullRepo, len(dbPackages))
|
||||
|
||||
for _, dbPkg := range dbPackages {
|
||||
pkg := &ProtoPackage{
|
||||
Pkgbase: dbPkg.Pkgbase,
|
||||
Repo: dbPkg.Repository,
|
||||
March: dbPkg.March,
|
||||
FullRepo: dbPkg.Repository.String() + "-" + dbPkg.March,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
|
||||
if !pkg.isAvailable(ctx, alpmHandle) {
|
||||
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
|
||||
err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
|
||||
if err != nil {
|
||||
log.Errorf("[HK] error deleting package %s->%s: %v", pkg.FullRepo, dbPkg.Pkgbase, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion != "":
|
||||
// check lastVersionBuild
|
||||
if dbPkg.LastVersionBuild != dbPkg.RepoVersion {
|
||||
log.Infof("[HK] %s->%s updating lastVersionBuild %s -> %s", fullRepo, dbPkg.Pkgbase, dbPkg.LastVersionBuild, dbPkg.RepoVersion)
|
||||
nDBPkg, err := dbPkg.Update().SetLastVersionBuild(dbPkg.RepoVersion).Save(ctx)
|
||||
if err != nil {
|
||||
log.Warningf("[HK] error updating lastVersionBuild for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
||||
} else {
|
||||
dbPkg = nDBPkg
|
||||
}
|
||||
}
|
||||
|
||||
var existingSplits []string
|
||||
var missingSplits []string
|
||||
for _, splitPkg := range dbPkg.Packages {
|
||||
pkgFile := filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch,
|
||||
splitPkg+"-"+dbPkg.RepoVersion+"-"+conf.Arch+".pkg.tar.zst")
|
||||
_, err = os.Stat(pkgFile)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
missingSplits = append(missingSplits, splitPkg)
|
||||
case err != nil:
|
||||
log.Warningf("[HK] error reading package-file %s: %v", splitPkg, err)
|
||||
default:
|
||||
existingSplits = append(existingSplits, pkgFile)
|
||||
}
|
||||
}
|
||||
if len(missingSplits) > 0 {
|
||||
log.Infof("[HK] %s->%s missing split-package(s): %s", fullRepo, dbPkg.Pkgbase, missingSplits)
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().
|
||||
ClearRepoVersion().
|
||||
ClearTagRev().
|
||||
SetStatus(dbpackage.StatusQueued).
|
||||
Save(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pkg := &ProtoPackage{
|
||||
FullRepo: fullRepo,
|
||||
PkgFiles: existingSplits,
|
||||
March: march,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
||||
}
|
||||
|
||||
rawState, err := os.ReadFile(filepath.Join(conf.Basedir.Work, stateDir, dbPkg.Repository.String()+"-"+conf.Arch, dbPkg.Pkgbase))
|
||||
if err != nil {
|
||||
log.Infof("[HK] state not found for %s->%s: %v, removing package", fullRepo, dbPkg.Pkgbase, err)
|
||||
pkg := &ProtoPackage{
|
||||
FullRepo: fullRepo,
|
||||
PkgFiles: existingSplits,
|
||||
March: march,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
||||
continue
|
||||
}
|
||||
|
||||
state, err := parseState(string(rawState))
|
||||
if err != nil {
|
||||
log.Warningf("[HK] error parsing state file for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if dbPkg.TagRev != nil && state.TagRev == *dbPkg.TagRev && state.PkgVer != dbPkg.Version {
|
||||
log.Infof("[HK] reseting package %s->%s with mismatched state information (%s!=%s)",
|
||||
fullRepo, dbPkg.Pkgbase, state.PkgVer, dbPkg.Version)
|
||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
|
||||
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
|
||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.RepoVersion != "" && !strings.HasPrefix(dbPkg.SkipReason, "delayed"):
|
||||
log.Infof("[HK] delete skipped package %s->%s", fullRepo, dbPkg.Pkgbase)
|
||||
pkg := &ProtoPackage{
|
||||
FullRepo: fullRepo,
|
||||
March: march,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
||||
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.SkipReason == "blacklisted" && !MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
|
||||
log.Infof("[HK] requeue previously blacklisted package %s->%s", fullRepo, dbPkg.Pkgbase)
|
||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearSkipReason().ClearTagRev().Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case dbPkg.Status == dbpackage.StatusFailed && dbPkg.RepoVersion != "":
|
||||
log.Infof("[HK] package %s->%s failed but still present in repo, removing", fullRepo, dbPkg.Pkgbase)
|
||||
pkg := &ProtoPackage{
|
||||
FullRepo: fullRepo,
|
||||
March: march,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("[HK/%s] all tasks finished", fullRepo)
|
||||
return nil
|
||||
}
|
||||
|
||||
func logHK(ctx context.Context) error {
|
||||
// check if package for log exists and if error can be fixed by rebuild
|
||||
logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, logFile := range logFiles {
|
||||
pathSplit := strings.Split(logFile, string(filepath.Separator))
|
||||
extSplit := strings.Split(filepath.Base(logFile), ".")
|
||||
pkgbase := strings.Join(extSplit[:len(extSplit)-1], ".")
|
||||
march := pathSplit[len(pathSplit)-2]
|
||||
|
||||
pkg := ProtoPackage{
|
||||
Pkgbase: pkgbase,
|
||||
March: march,
|
||||
}
|
||||
|
||||
if exists, err := pkg.exists(); err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
_ = os.Remove(logFile)
|
||||
continue
|
||||
}
|
||||
|
||||
pkgSkipped, err := db.DBPackage.Query().Where(
|
||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
||||
dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusSkipped),
|
||||
).Exist(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pkgSkipped {
|
||||
_ = os.Remove(logFile)
|
||||
continue
|
||||
}
|
||||
|
||||
logContent, err := os.ReadFile(logFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sLogContent := string(logContent)
|
||||
|
||||
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) ||
|
||||
reDownloadError2.MatchString(sLogContent) {
|
||||
rows, err := db.DBPackage.Update().Where(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusFailed)).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows > 0 {
|
||||
log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows)
|
||||
}
|
||||
} else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) {
|
||||
rows, err := db.DBPackage.Update().Where(
|
||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
||||
dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusFailed),
|
||||
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
|
||||
).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows > 0 {
|
||||
log.Infof("[HK/%s/%s] fixable build-error detected (linker-error), requeueing package (%d)", pkg.March, pkg.Pkgbase, rows)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func debugHK() {
|
||||
for _, march := range conf.March {
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march)); err == nil {
|
||||
log.Debugf("[DHK/%s] start cleanup debug packages", march)
|
||||
cleanCmd := exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Debug, march), "-k", "1")
|
||||
res, err := cleanCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Warningf("[DHK/%s] cleanup debug packages failed: %v (%s)", march, err, string(res))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
665
main.go
665
main.go
@@ -1,587 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"ALHP.go/ent"
|
||||
"ALHP.go/ent/dbpackage"
|
||||
"ALHP.go/ent/migrate"
|
||||
"bytes"
|
||||
"context"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/Jguer/go-alpm/v2"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/wercker/journalhook"
|
||||
"gopkg.in/yaml.v2"
|
||||
"html/template"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/migrate"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
conf *Conf
|
||||
repos []string
|
||||
alpmHandle *alpm.Handle
|
||||
reMarch = regexp.MustCompile(`(-march=)(.+?) `)
|
||||
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
|
||||
rePkgFile = regexp.MustCompile(`^(.*)-.*-.*-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
|
||||
buildManager *BuildManager
|
||||
db *ent.Client
|
||||
journalLog = flag.Bool("journal", false, "Log to systemd journal instead of stdout")
|
||||
checkInterval = flag.Int("interval", 5, "How often svn2git should be checked in minutes (default: 5)")
|
||||
configFile = flag.String("config", "config.yaml", "set config file name/path")
|
||||
)
|
||||
|
||||
func (b *BuildManager) buildWorker(id int) {
|
||||
err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, 18)
|
||||
if err != nil {
|
||||
log.Warningf("[worker-%d] Failed to drop priority: %v", id, err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case pkg := <-b.build:
|
||||
if b.exit {
|
||||
log.Infof("Worker %d exited...", id)
|
||||
return
|
||||
} else {
|
||||
b.buildWG.Add(1)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
log.Infof("[%s/%s] Build starting", pkg.FullRepo, pkg.Pkgbase)
|
||||
|
||||
dbPkg := getDbPackage(pkg)
|
||||
dbPkg = dbPkg.Update().SetStatus(dbpackage.StatusBuilding).SetBuildTimeStart(time.Now().UTC()).SetSkipReason("").SaveX(context.Background())
|
||||
|
||||
err := importKeys(pkg)
|
||||
if err != nil {
|
||||
log.Warningf("[%s/%s] Failed to import pgp keys: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||
}
|
||||
|
||||
err = increasePkgRel(pkg)
|
||||
if err != nil {
|
||||
log.Errorf("[%s/%s] Failed to increase pkgrel: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||
b.buildWG.Done()
|
||||
continue
|
||||
}
|
||||
pkg.PkgFiles = []string{}
|
||||
|
||||
// default to LTO
|
||||
makepkgFile := "makepkg-%s-lto.conf"
|
||||
if contains(conf.Blacklist.LTO, pkg.Pkgbase) {
|
||||
// use non-lto makepkg.conf if LTO is blacklisted for this package
|
||||
makepkgFile = "makepkg-%s.conf"
|
||||
}
|
||||
cmd := exec.Command("sh", "-c",
|
||||
"cd "+filepath.Dir(pkg.Pkgbuild)+"&&makechrootpkg -c -D "+conf.Basedir.Makepkg+" -l worker-"+strconv.Itoa(id)+" -r "+conf.Basedir.Chroot+" -- "+
|
||||
"--config "+filepath.Join(conf.Basedir.Makepkg, fmt.Sprintf(makepkgFile, pkg.March)))
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
|
||||
check(cmd.Start())
|
||||
|
||||
b.buildProcMutex.Lock()
|
||||
b.buildProcesses = append(b.buildProcesses, cmd.Process)
|
||||
b.buildProcMutex.Unlock()
|
||||
|
||||
err = cmd.Wait()
|
||||
|
||||
b.buildProcMutex.Lock()
|
||||
for i := range b.buildProcesses {
|
||||
if b.buildProcesses[i].Pid == cmd.Process.Pid {
|
||||
b.buildProcesses = append(b.buildProcesses[:i], b.buildProcesses[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
b.buildProcMutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
if b.exit {
|
||||
gitClean(pkg)
|
||||
b.buildWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
log.Warningf("[%s/%s] Build failed, exit code %d", pkg.FullRepo, pkg.Pkgbase, cmd.ProcessState.ExitCode())
|
||||
|
||||
b.failedMutex.Lock()
|
||||
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, pkg.FullRepo+"_failed.txt"), os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_SYNC, 0644)
|
||||
check(err)
|
||||
|
||||
_, err = f.WriteString(fmt.Sprintf("%s==%s\n", pkg.Pkgbase, constructVersion(pkg.Srcinfo.Pkgver, pkg.Srcinfo.Pkgrel, pkg.Srcinfo.Epoch)))
|
||||
check(err)
|
||||
check(f.Close())
|
||||
b.failedMutex.Unlock()
|
||||
|
||||
check(os.MkdirAll(filepath.Join(conf.Basedir.Repo, "logs"), 0755))
|
||||
check(os.WriteFile(filepath.Join(conf.Basedir.Repo, "logs", pkg.Pkgbase+".log"), out.Bytes(), 0644))
|
||||
|
||||
dbPkg.Update().SetStatus(dbpackage.StatusFailed).SetBuildTimeEnd(time.Now()).SetHash(pkg.Hash).ExecX(context.Background())
|
||||
|
||||
// purge failed package from repo
|
||||
b.repoPurge[pkg.FullRepo] <- pkg
|
||||
|
||||
gitClean(pkg)
|
||||
b.buildWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst"))
|
||||
check(err)
|
||||
log.Debug(pkgFiles)
|
||||
|
||||
if len(pkgFiles) == 0 {
|
||||
log.Warningf("No packages found after building %s. Abort build.", pkg.Pkgbase)
|
||||
|
||||
gitClean(pkg)
|
||||
b.buildWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
for _, file := range pkgFiles {
|
||||
cmd = exec.Command("gpg", "--batch", "--detach-sign", file)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("Failed to sign %s: %s", pkg.Pkgbase, err)
|
||||
b.buildWG.Done()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst*"))
|
||||
check(err)
|
||||
|
||||
for _, file := range copyFiles {
|
||||
_, err = copyFile(file, filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, filepath.Base(file)))
|
||||
if err != nil {
|
||||
check(err)
|
||||
b.buildWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if filepath.Ext(file) != ".sig" {
|
||||
pkg.PkgFiles = append(pkg.PkgFiles, filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, filepath.Base(file)))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, "logs", pkg.Pkgbase+".log")); err == nil {
|
||||
check(os.Remove(filepath.Join(conf.Basedir.Repo, "logs", pkg.Pkgbase+".log")))
|
||||
}
|
||||
|
||||
dbPkg.Update().SetStatus(dbpackage.StatusBuild).SetBuildTimeEnd(time.Now().UTC()).ExecX(context.Background())
|
||||
|
||||
log.Infof("[%s/%s] Build successful (%s)", pkg.FullRepo, pkg.Pkgbase, time.Now().Sub(start))
|
||||
b.repoAdd[pkg.FullRepo] <- pkg
|
||||
|
||||
gitClean(pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) parseWorker() {
|
||||
for {
|
||||
if b.exit {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pkg := <-b.parse:
|
||||
info, err := genSRCINFO(pkg.Pkgbuild)
|
||||
if err != nil {
|
||||
log.Warningf("Failed to generate SRCINFO for %s: %v", pkg.Pkgbase, err)
|
||||
b.parseWG.Done()
|
||||
continue
|
||||
}
|
||||
pkg.Srcinfo = info
|
||||
pkg.Version = constructVersion(pkg.Srcinfo.Pkgver, pkg.Srcinfo.Pkgrel, pkg.Srcinfo.Epoch)
|
||||
|
||||
dbPkg := getDbPackage(pkg)
|
||||
dbPkg = dbPkg.Update().SetUpdated(time.Now()).SetVersion(pkg.Version).SaveX(context.Background())
|
||||
|
||||
skipping := false
|
||||
if contains(info.Arch, "any") {
|
||||
log.Debugf("Skipped %s: any-Package", info.Pkgbase)
|
||||
dbPkg.SkipReason = "arch = any"
|
||||
dbPkg.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
} else if contains(conf.Blacklist.Packages, info.Pkgbase) {
|
||||
log.Debugf("Skipped %s: blacklisted package", info.Pkgbase)
|
||||
dbPkg.SkipReason = "blacklisted"
|
||||
dbPkg.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
} else if contains(info.MakeDepends, "ghc") || contains(info.MakeDepends, "haskell-ghc") || contains(info.Depends, "ghc") || contains(info.Depends, "haskell-ghc") {
|
||||
// Skip Haskell packages for now, as we are facing linking problems with them,
|
||||
// most likely caused by not having a dependency check implemented yet and building at random.
|
||||
// https://git.harting.dev/anonfunc/ALHP.GO/issues/11
|
||||
log.Debugf("Skipped %s: haskell package", info.Pkgbase)
|
||||
dbPkg.SkipReason = "blacklisted (haskell)"
|
||||
dbPkg.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
} else if isPkgFailed(pkg) {
|
||||
log.Debugf("Skipped %s: failed build", info.Pkgbase)
|
||||
dbPkg.SkipReason = ""
|
||||
dbPkg.Status = dbpackage.StatusFailed
|
||||
skipping = true
|
||||
}
|
||||
|
||||
if skipping {
|
||||
dbPkg = dbPkg.Update().SetStatus(dbPkg.Status).SetSkipReason(dbPkg.SkipReason).SetHash(pkg.Hash).SaveX(context.Background())
|
||||
b.repoPurge[pkg.FullRepo] <- pkg
|
||||
b.parseWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
repoVer := getVersionFromRepo(pkg)
|
||||
dbPkg = dbPkg.Update().SetRepoVersion(repoVer).SaveX(context.Background())
|
||||
if repoVer != "" && alpm.VerCmp(repoVer, pkg.Version) > 0 {
|
||||
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", info.Pkgbase, pkg.Version, repoVer)
|
||||
dbPkg = dbPkg.Update().SetStatus(dbpackage.StatusLatest).SetSkipReason("").SetHash(pkg.Hash).SaveX(context.Background())
|
||||
b.parseWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
isLatest, local, syncVersion, err := isMirrorLatest(alpmHandle, pkg)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
default:
|
||||
log.Warningf("[%s/%s] Problem solving dependencies: %v", pkg.FullRepo, info.Pkgbase, err)
|
||||
case MultiplePKGBUILDError:
|
||||
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", info.Pkgbase, err)
|
||||
dbPkg = dbPkg.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(context.Background())
|
||||
b.repoPurge[pkg.FullRepo] <- pkg
|
||||
b.parseWG.Done()
|
||||
continue
|
||||
case UnableToSatisfyError:
|
||||
log.Infof("Skipped %s: unable to resolve dependencies: %v", info.Pkgbase, err)
|
||||
dbPkg = dbPkg.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(context.Background())
|
||||
b.repoPurge[pkg.FullRepo] <- pkg
|
||||
b.parseWG.Done()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
dbPkg = dbPkg.Update().SetStatus(dbpackage.StatusQueued).SaveX(context.Background())
|
||||
|
||||
if !isLatest {
|
||||
if local != nil {
|
||||
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)", info.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
|
||||
dbPkg.Update().SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(context.Background())
|
||||
} else {
|
||||
log.Infof("Delayed %s: not all dependencies are up to date or resolvable", info.Pkgbase)
|
||||
dbPkg.Update().SetSkipReason("waiting for mirror").ExecX(context.Background())
|
||||
}
|
||||
|
||||
// Purge delayed packages in case delay is caused by inconsistencies in svn2git.
|
||||
// Worst case would be clients downloading a package update twice, once from their official mirror,
|
||||
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
|
||||
// in an outdated version.
|
||||
b.repoPurge[pkg.FullRepo] <- pkg
|
||||
b.parseWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
b.parseWG.Done()
|
||||
b.build <- pkg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) htmlWorker() {
|
||||
type Pkg struct {
|
||||
Pkgbase string
|
||||
Status string
|
||||
Class string
|
||||
Skip string
|
||||
Version string
|
||||
Svn2GitVersion string
|
||||
BuildDate string
|
||||
BuildDuration time.Duration
|
||||
Checked string
|
||||
Log string
|
||||
}
|
||||
|
||||
type Repo struct {
|
||||
Name string
|
||||
Packages []Pkg
|
||||
}
|
||||
|
||||
type March struct {
|
||||
Name string
|
||||
Repos []Repo
|
||||
}
|
||||
|
||||
type tpl struct {
|
||||
March []March
|
||||
Generated string
|
||||
}
|
||||
|
||||
for {
|
||||
gen := &tpl{}
|
||||
|
||||
for _, march := range conf.March {
|
||||
addMarch := March{
|
||||
Name: march,
|
||||
}
|
||||
|
||||
for _, repo := range conf.Repos {
|
||||
addRepo := Repo{
|
||||
Name: repo,
|
||||
}
|
||||
|
||||
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(context.Background())
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
status, class := statusId2string(pkg.Status)
|
||||
|
||||
addPkg := Pkg{
|
||||
Pkgbase: pkg.Pkgbase,
|
||||
Status: status,
|
||||
Class: class,
|
||||
Skip: pkg.SkipReason,
|
||||
Version: pkg.RepoVersion,
|
||||
Svn2GitVersion: pkg.Version,
|
||||
}
|
||||
|
||||
if !pkg.BuildTimeEnd.IsZero() && !pkg.BuildTimeStart.IsZero() {
|
||||
addPkg.BuildDuration = pkg.BuildTimeEnd.Sub(pkg.BuildTimeStart)
|
||||
}
|
||||
|
||||
if !pkg.BuildTimeStart.IsZero() {
|
||||
addPkg.BuildDate = pkg.BuildTimeStart.UTC().Format(time.RFC1123)
|
||||
}
|
||||
|
||||
if !pkg.Updated.IsZero() {
|
||||
addPkg.Checked = pkg.Updated.UTC().Format(time.RFC1123)
|
||||
}
|
||||
|
||||
if pkg.Status == dbpackage.StatusFailed {
|
||||
addPkg.Log = fmt.Sprintf("logs/%s.log", pkg.Pkgbase)
|
||||
}
|
||||
|
||||
addRepo.Packages = append(addRepo.Packages, addPkg)
|
||||
}
|
||||
addMarch.Repos = append(addMarch.Repos, addRepo)
|
||||
}
|
||||
gen.March = append(gen.March, addMarch)
|
||||
}
|
||||
|
||||
gen.Generated = time.Now().UTC().Format(time.RFC1123)
|
||||
|
||||
statusTpl, err := template.ParseFiles("tpl/packages.html")
|
||||
check(err)
|
||||
|
||||
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
check(statusTpl.Execute(f, gen))
|
||||
check(f.Close())
|
||||
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) repoWorker(repo string) {
|
||||
for {
|
||||
select {
|
||||
case pkg := <-b.repoAdd[repo]:
|
||||
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
||||
args = append(args, pkg.PkgFiles...)
|
||||
cmd := exec.Command("repo-add", args...)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil && cmd.ProcessState.ExitCode() != 1 {
|
||||
log.Panicf("%s while repo-add: %v", string(res), err)
|
||||
}
|
||||
|
||||
dbPkg := getDbPackage(pkg)
|
||||
dbPkg = dbPkg.Update().SetStatus(dbpackage.StatusLatest).SetSkipReason("").SetRepoVersion(pkg.Version).SetHash(pkg.Hash).SaveX(context.Background())
|
||||
|
||||
cmd = exec.Command("paccache",
|
||||
"-rc", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch),
|
||||
"-k", "1")
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
check(err)
|
||||
b.buildWG.Done()
|
||||
case pkg := <-b.repoPurge[repo]:
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(pkg.PkgFiles) == 0 {
|
||||
findPkgFiles(pkg)
|
||||
if len(pkg.PkgFiles) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var realPkgs []string
|
||||
for _, realPkg := range pkg.Srcinfo.Packages {
|
||||
realPkgs = append(realPkgs, realPkg.Pkgname)
|
||||
}
|
||||
|
||||
b.repoWG.Add(1)
|
||||
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
||||
args = append(args, realPkgs...)
|
||||
cmd := exec.Command("repo-remove", args...)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
||||
log.Debugf("Deleteing package %s failed: Package not found in database", pkg.Pkgbase)
|
||||
b.repoWG.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
dbPkg := getDbPackage(pkg)
|
||||
dbPkg = dbPkg.Update().SetRepoVersion("").SaveX(context.Background())
|
||||
|
||||
for _, file := range pkg.PkgFiles {
|
||||
check(os.Remove(file))
|
||||
check(os.Remove(file + ".sig"))
|
||||
}
|
||||
b.repoWG.Done()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) syncWorker() {
|
||||
check(os.MkdirAll(conf.Basedir.Upstream, 0755))
|
||||
|
||||
for i := 0; i < conf.Build.Worker; i++ {
|
||||
go b.buildWorker(i)
|
||||
}
|
||||
|
||||
for i := 0; i < runtime.NumCPU(); i++ {
|
||||
go b.parseWorker()
|
||||
}
|
||||
|
||||
for {
|
||||
b.buildWG.Wait()
|
||||
|
||||
for gitDir, gitURL := range conf.Svn2git {
|
||||
gitPath := filepath.Join(conf.Basedir.Upstream, gitDir)
|
||||
|
||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||
cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
check(err)
|
||||
} else if err == nil {
|
||||
cmd := exec.Command("sudo", "git_clean.sh", gitPath)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("Failed to execute %s: %v", cmd.String(), err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("sh", "-c", "cd "+gitPath+" && git reset --hard")
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
check(err)
|
||||
|
||||
cmd = exec.Command("sh", "-c", "cd "+gitPath+" && git pull")
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
check(err)
|
||||
}
|
||||
}
|
||||
|
||||
// fetch updates between sync runs
|
||||
b.alpmMutex.Lock()
|
||||
check(alpmHandle.Release())
|
||||
err := setupChroot()
|
||||
for err != nil {
|
||||
log.Warningf("Unable to upgrade chroot, trying again later.")
|
||||
time.Sleep(time.Minute)
|
||||
err = setupChroot()
|
||||
}
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Chroot, pristineChroot), filepath.Join(conf.Basedir.Chroot, pristineChroot, "/var/lib/pacman"))
|
||||
check(err)
|
||||
b.alpmMutex.Unlock()
|
||||
|
||||
pkgBuilds, err := Glob(filepath.Join(conf.Basedir.Upstream, "/**/PKGBUILD"))
|
||||
check(err)
|
||||
|
||||
for _, pkgbuild := range pkgBuilds {
|
||||
if b.exit {
|
||||
return
|
||||
}
|
||||
|
||||
sPkgbuild := strings.Split(pkgbuild, "/")
|
||||
repo := sPkgbuild[len(sPkgbuild)-2]
|
||||
|
||||
if repo == "trunk" || !contains(conf.Repos, strings.Split(repo, "-")[0]) || containsSubStr(repo, conf.Blacklist.Repo) {
|
||||
continue
|
||||
}
|
||||
|
||||
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
|
||||
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
|
||||
dbPkg, dbErr := db.DbPackage.Query().Where(dbpackage.And(
|
||||
dbpackage.Pkgbase(sPkgbuild[len(sPkgbuild)-4]),
|
||||
dbpackage.RepositoryEQ(dbpackage.Repository(strings.Split(repo, "-")[0]))),
|
||||
).Only(context.Background())
|
||||
|
||||
if dbErr != nil {
|
||||
switch dbErr.(type) {
|
||||
case *ent.NotFoundError:
|
||||
log.Debugf("[%s/%s] Package not found in database", strings.Split(repo, "-")[0], sPkgbuild[len(sPkgbuild)-4])
|
||||
break
|
||||
default:
|
||||
log.Errorf("[%s/%s] Problem querying db for package: %v", strings.Split(repo, "-")[0], sPkgbuild[len(sPkgbuild)-4], dbErr)
|
||||
}
|
||||
}
|
||||
|
||||
b3s, err := b3sum(pkgbuild)
|
||||
check(err)
|
||||
|
||||
if dbPkg != nil && b3s == dbPkg.Hash {
|
||||
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", strings.Split(repo, "-")[0], sPkgbuild[len(sPkgbuild)-4], b3s)
|
||||
continue
|
||||
}
|
||||
|
||||
// send to parse
|
||||
for _, march := range conf.March {
|
||||
b.parseWG.Add(1)
|
||||
b.parse <- &BuildPackage{
|
||||
Pkgbuild: pkgbuild,
|
||||
Pkgbase: sPkgbuild[len(sPkgbuild)-4],
|
||||
Repo: dbpackage.Repository(strings.Split(repo, "-")[0]),
|
||||
March: march,
|
||||
FullRepo: strings.Split(repo, "-")[0] + "-" + march,
|
||||
Hash: b3s,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.parseWG.Wait()
|
||||
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
killSignals := make(chan os.Signal, 1)
|
||||
signal.Notify(killSignals, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
reloadSignals := make(chan os.Signal, 1)
|
||||
signal.Notify(reloadSignals, syscall.SIGUSR1)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
confStr, err := os.ReadFile("config.yaml")
|
||||
check(err)
|
||||
confStr, err := os.ReadFile(*configFile)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading config file: %v", err)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(confStr, &conf)
|
||||
check(err)
|
||||
if err != nil {
|
||||
log.Fatalf("error parsing config file: %v", err)
|
||||
}
|
||||
|
||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||
check(err)
|
||||
if err != nil {
|
||||
log.Fatalf("error parsing log level from config: %v", err)
|
||||
}
|
||||
log.SetLevel(lvl)
|
||||
if *journalLog {
|
||||
journalhook.Enable()
|
||||
@@ -589,66 +60,94 @@ func main() {
|
||||
|
||||
err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, 5)
|
||||
if err != nil {
|
||||
log.Warningf("Failed to drop priority: %v", err)
|
||||
log.Infof("failed to drop priority: %v", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(conf.Basedir.Repo, 0755)
|
||||
check(err)
|
||||
err = os.MkdirAll(conf.Basedir.Repo, 0o755)
|
||||
if err != nil {
|
||||
log.Fatalf("error creating repo dir: %v", err)
|
||||
}
|
||||
|
||||
if conf.Db.Driver == "pgx" {
|
||||
pdb, err := sql.Open("pgx", conf.Db.ConnectTo)
|
||||
if conf.DB.Driver == "pgx" {
|
||||
pdb, err := sql.Open("pgx", conf.DB.ConnectTo)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open database %s: %v", conf.Db.ConnectTo, err)
|
||||
log.Fatalf("failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
}
|
||||
|
||||
drv := sql.OpenDB(dialect.Postgres, pdb.DB())
|
||||
db = ent.NewClient(ent.Driver(drv))
|
||||
} else {
|
||||
db, err = ent.Open(conf.Db.Driver, conf.Db.ConnectTo)
|
||||
db, err = ent.Open(conf.DB.Driver, conf.DB.ConnectTo)
|
||||
if err != nil {
|
||||
log.Panicf("Failed to open database %s: %v", conf.Db.ConnectTo, err)
|
||||
log.Panicf("failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
}
|
||||
defer func(Client *ent.Client) {
|
||||
_ = Client.Close()
|
||||
}(db)
|
||||
}
|
||||
|
||||
if err := db.Schema.Create(context.Background(), migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
|
||||
log.Panicf("Automigrate failed: %v", err)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if err := db.Schema.Create(ctx, migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
|
||||
log.Panicf("automigrate failed: %v", err)
|
||||
}
|
||||
|
||||
buildManager = &BuildManager{
|
||||
build: make(chan *BuildPackage, 10000),
|
||||
parse: make(chan *BuildPackage, 10000),
|
||||
repoPurge: make(map[string]chan *BuildPackage),
|
||||
repoAdd: make(map[string]chan *BuildPackage),
|
||||
exit: false,
|
||||
repoPurge: make(map[string]chan []*ProtoPackage),
|
||||
repoAdd: make(map[string]chan []*ProtoPackage),
|
||||
queueSignal: make(chan struct{}),
|
||||
alpmMutex: new(sync.RWMutex),
|
||||
building: []*ProtoPackage{},
|
||||
buildingLock: new(sync.RWMutex),
|
||||
repoWG: new(sync.WaitGroup),
|
||||
}
|
||||
|
||||
err = setupChroot()
|
||||
buildManager.setupMetrics(conf.Metrics.Port)
|
||||
|
||||
err = setupChroot(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to setup chroot: %v", err)
|
||||
log.Panicf("unable to setup chroot: %v", err)
|
||||
}
|
||||
syncMarchs()
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Chroot, pristineChroot), filepath.Join(conf.Basedir.Chroot, pristineChroot, "/var/lib/pacman"))
|
||||
check(err)
|
||||
|
||||
go buildManager.syncWorker()
|
||||
go buildManager.htmlWorker()
|
||||
|
||||
<-killSignals
|
||||
|
||||
buildManager.exit = true
|
||||
buildManager.buildProcMutex.RLock()
|
||||
for _, p := range buildManager.buildProcesses {
|
||||
pgid, err := syscall.Getpgid(p.Pid)
|
||||
check(err)
|
||||
|
||||
check(syscall.Kill(-pgid, syscall.SIGTERM))
|
||||
err = syncMarchs(ctx)
|
||||
if err != nil {
|
||||
log.Panicf("error syncing marchs: %v", err)
|
||||
}
|
||||
buildManager.buildProcMutex.RUnlock()
|
||||
buildManager.buildWG.Wait()
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
if err != nil {
|
||||
log.Panicf("error while ALPM-init: %v", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
_ = buildManager.syncWorker(ctx)
|
||||
}()
|
||||
|
||||
killLoop:
|
||||
for {
|
||||
select {
|
||||
case <-killSignals:
|
||||
break killLoop
|
||||
case <-reloadSignals:
|
||||
confStr, err := os.ReadFile(*configFile)
|
||||
if err != nil {
|
||||
log.Panicf("unable to open config: %v", err)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(confStr, &conf)
|
||||
if err != nil {
|
||||
log.Panicf("unable to parse config: %v", err)
|
||||
}
|
||||
|
||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||
if err != nil {
|
||||
log.Panicf("failure setting logging level: %v", err)
|
||||
}
|
||||
log.SetLevel(lvl)
|
||||
log.Infof("config reloaded")
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
buildManager.repoWG.Wait()
|
||||
check(alpmHandle.Release())
|
||||
_ = alpmHandle.Release()
|
||||
}
|
||||
|
26
metrics.go
Normal file
26
metrics.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (b *BuildManager) setupMetrics(port uint32) {
|
||||
b.metrics.queueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "build_queue_size",
|
||||
Help: "Build queue size",
|
||||
}, []string{"repository", "status"})
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", promhttp.Handler())
|
||||
go func() {
|
||||
err := http.ListenAndServe(fmt.Sprintf(":%d", port), mux) //nolint:gosec
|
||||
if err != nil {
|
||||
log.Errorf("failed to start metrics server: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
94
package.go
Normal file
94
package.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqljson"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Package string
|
||||
|
||||
// Name returns package's name
|
||||
func (pkg Package) Name() string {
|
||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
||||
return strings.Join(fNameSplit[:len(fNameSplit)-3], "-")
|
||||
}
|
||||
|
||||
// MArch returns package's march
|
||||
func (pkg Package) MArch() *string {
|
||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
||||
res := strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
|
||||
return &res
|
||||
}
|
||||
|
||||
// Repo returns package's dbpackage.Repository
|
||||
func (pkg Package) Repo() dbpackage.Repository {
|
||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
||||
return dbpackage.Repository(strings.Split(splitPath[len(splitPath)-4], "-")[0])
|
||||
}
|
||||
|
||||
// FullRepo returns package's dbpackage.Repository-march
|
||||
func (pkg Package) FullRepo() *string {
|
||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
||||
return &splitPath[len(splitPath)-4]
|
||||
}
|
||||
|
||||
// Version returns version extracted from package
|
||||
func (pkg Package) Version() string {
|
||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
||||
return strings.Join(fNameSplit[len(fNameSplit)-3:len(fNameSplit)-1], "-")
|
||||
}
|
||||
|
||||
// Arch returns package's Architecture
|
||||
func (pkg Package) Arch() *string {
|
||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
||||
fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".")
|
||||
return &fNameSplit[0]
|
||||
}
|
||||
|
||||
// HasValidSignature returns if package has valid detached signature file
|
||||
func (pkg Package) HasValidSignature() (bool, error) {
|
||||
cmd := exec.Command("gpg", "--verify", string(pkg)+".sig") //nolint:gosec
|
||||
res, err := cmd.CombinedOutput()
|
||||
switch {
|
||||
case cmd.ProcessState.ExitCode() == 2 || cmd.ProcessState.ExitCode() == 1:
|
||||
return false, nil
|
||||
case cmd.ProcessState.ExitCode() == 0:
|
||||
return true, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("error checking signature: %w (%s)", err, res)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// DBPackage returns ent.DBPackage for package
|
||||
func (pkg Package) DBPackage(ctx context.Context, db *ent.Client) (*ent.DBPackage, error) {
|
||||
return pkg.DBPackageIsolated(ctx, *pkg.MArch(), pkg.Repo(), db)
|
||||
}
|
||||
|
||||
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
|
||||
func (pkg Package) DBPackageIsolated(ctx context.Context, march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
|
||||
dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
|
||||
s.Where(
|
||||
sql.And(
|
||||
sqljson.ValueContains(dbpackage.FieldPackages, pkg.Name()),
|
||||
sql.EQ(dbpackage.FieldMarch, march),
|
||||
sql.EQ(dbpackage.FieldRepository, repo)),
|
||||
)
|
||||
}).Only(ctx)
|
||||
if ent.IsNotFound(err) {
|
||||
log.Debugf("not found in database: %s", pkg.Name())
|
||||
return nil, err
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dbPkg, nil
|
||||
}
|
780
proto_package.go
Normal file
780
proto_package.go
Normal file
@@ -0,0 +1,780 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/Jguer/go-alpm/v2"
|
||||
"github.com/Morganamilo/go-srcinfo"
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/google/uuid"
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/sethvargo/go-retry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProtoPackage struct {
|
||||
Pkgbase string
|
||||
Srcinfo *srcinfo.Srcinfo
|
||||
Arch string
|
||||
PkgFiles []string
|
||||
Repo dbpackage.Repository
|
||||
March string
|
||||
FullRepo string
|
||||
Version string
|
||||
DBPackage *ent.DBPackage
|
||||
Pkgbuild string
|
||||
State *StateInfo
|
||||
}
|
||||
|
||||
var (
|
||||
ErrorNotEligible = errors.New("package is not eligible")
|
||||
)
|
||||
|
||||
func (p *ProtoPackage) isEligible(ctx context.Context) bool {
|
||||
skipping := false
|
||||
switch {
|
||||
case p.Arch == "any":
|
||||
log.Debugf("skipped %s: any-package", p.Pkgbase)
|
||||
p.DBPackage.SkipReason = "arch = any"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case MatchGlobList(p.Pkgbase, conf.Blacklist.Packages):
|
||||
log.Debugf("skipped %s: package on no-build list", p.Pkgbase)
|
||||
p.DBPackage.SkipReason = "blacklisted"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: //nolint:gosec
|
||||
log.Debugf("skipped %s: memory limit exceeded (%s)", p.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) //nolint:gosec
|
||||
p.DBPackage.SkipReason = "memory limit exceeded"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case p.isPkgFailed():
|
||||
log.Debugf("skipped %s: failed build", p.Pkgbase)
|
||||
skipping = true
|
||||
case p.Srcinfo != nil:
|
||||
// skip haskell packages, since they cannot be optimized currently (no -O3 & march has no effect as far as I know)
|
||||
if Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc") {
|
||||
log.Debugf("skipped %s: haskell", p.Pkgbase)
|
||||
p.DBPackage.SkipReason = "haskell"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
}
|
||||
}
|
||||
|
||||
if skipping {
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SetStatus(p.DBPackage.Status).
|
||||
SetSkipReason(p.DBPackage.SkipReason).SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||
return false
|
||||
}
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).SaveX(ctx)
|
||||
|
||||
if Contains(conf.Blacklist.LTO, p.Pkgbase) && p.DBPackage.Lto != dbpackage.LtoDisabled {
|
||||
p.DBPackage = p.DBPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
|
||||
}
|
||||
|
||||
repoVer, err := p.repoVersion()
|
||||
if err != nil {
|
||||
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
|
||||
} else if alpm.VerCmp(repoVer, p.Version) > 0 {
|
||||
log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", p.Pkgbase, p.Version, repoVer)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
start := time.Now().UTC()
|
||||
chroot := "build_" + uuid.New().String()
|
||||
|
||||
buildFolder, err := p.setupBuildDir(ctx)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
chroot := chroot
|
||||
log.Debugf("removing chroot %s", chroot)
|
||||
err := cleanBuildDir(buildFolder, filepath.Join(conf.Basedir.Work, chrootDir, chroot))
|
||||
if err != nil {
|
||||
log.Errorf("error removing builddir/chroot %s/%s: %v", buildDir, chroot, err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = p.genSrcinfo()
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error generating srcinfo: %w", err)
|
||||
}
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
p.DBPackage = p.DBPackage.Update().SetPackages(packages2slice(p.Srcinfo.Packages)).SaveX(ctx)
|
||||
|
||||
// skip haskell packages, since they cannot be optimized currently (no -O3 & march has no effect as far as I know)
|
||||
if Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc") {
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("haskell").SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||
buildManager.repoPurge[p.FullRepo] <- []*ProtoPackage{p}
|
||||
return time.Since(start), ErrorNotEligible
|
||||
}
|
||||
|
||||
isLatest, local, syncVersion, err := p.isMirrorLatest(alpmHandle)
|
||||
if err != nil {
|
||||
var multipleStateFilesError MultipleStateFilesError
|
||||
var unableToSatisfyError UnableToSatisfyError
|
||||
switch {
|
||||
default:
|
||||
return time.Since(start), fmt.Errorf("error solving deps: %w", err)
|
||||
case errors.As(err, &multipleStateFilesError):
|
||||
log.Infof("skipped %s: multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
||||
return time.Since(start), err
|
||||
case errors.As(err, &unableToSatisfyError):
|
||||
log.Infof("skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
||||
return time.Since(start), ErrorNotEligible
|
||||
}
|
||||
}
|
||||
|
||||
if !isLatest {
|
||||
if local != nil {
|
||||
log.Infof("delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)",
|
||||
p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).
|
||||
SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
|
||||
|
||||
// Returning an error here causes the package to be purged.
|
||||
// Purge delayed packages in case delay is caused by inconsistencies in state.
|
||||
// Worst case would be clients downloading a package update twice, once from their official mirror,
|
||||
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
|
||||
// in an outdated version.
|
||||
if time.Since(local.BuildDate()).Hours() >= 48 && p.DBPackage.RepoVersion != "" {
|
||||
return time.Since(start), errors.New("overdue package waiting")
|
||||
}
|
||||
} else {
|
||||
log.Infof("delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx)
|
||||
}
|
||||
|
||||
return time.Since(start), ErrorNotEligible
|
||||
}
|
||||
|
||||
log.Infof("[P] build starting: %s->%s->%s", p.FullRepo, p.Pkgbase, p.Version)
|
||||
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
|
||||
|
||||
err = p.importKeys()
|
||||
if err != nil {
|
||||
log.Warningf("[P] failed to import pgp keys for %s->%s->%s: %v", p.FullRepo, p.Pkgbase, p.Version, err)
|
||||
}
|
||||
|
||||
buildNo := 1
|
||||
versionSlice := strings.Split(p.DBPackage.LastVersionBuild, ".")
|
||||
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version {
|
||||
buildNo, err = strconv.Atoi(versionSlice[len(versionSlice)-1])
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error while reading buildNo from pkgrel: %w", err)
|
||||
}
|
||||
buildNo++
|
||||
}
|
||||
|
||||
err = p.increasePkgRel(buildNo)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error while increasing pkgrel: %w", err)
|
||||
}
|
||||
|
||||
p.PkgFiles = []string{}
|
||||
|
||||
// default to LTO
|
||||
makepkgFile := makepkg
|
||||
if p.DBPackage.Lto == dbpackage.LtoDisabled || p.DBPackage.Lto == dbpackage.LtoAutoDisabled {
|
||||
// use non-lto makepkg.conf if LTO is blacklisted for this package
|
||||
makepkgFile = makepkgLTO
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec
|
||||
"-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config",
|
||||
filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
|
||||
if err = cmd.Start(); err != nil {
|
||||
return time.Since(start), fmt.Errorf("error starting build: %w", err)
|
||||
}
|
||||
|
||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
||||
if err != nil {
|
||||
log.Errorf("error getting PGID: %v", err)
|
||||
}
|
||||
|
||||
done := make(chan bool)
|
||||
result := make(chan int64)
|
||||
go pollMemoryUsage(pgid, 1*time.Second, done, result)
|
||||
|
||||
err = cmd.Wait()
|
||||
close(done)
|
||||
peakMem := <-result
|
||||
close(result)
|
||||
|
||||
Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage)
|
||||
if !ok {
|
||||
log.Panicf("rusage is not of type *syscall.Rusage, are we running on unix-like?")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return time.Since(start), ctx.Err()
|
||||
}
|
||||
|
||||
if p.DBPackage.Lto != dbpackage.LtoAutoDisabled && p.DBPackage.Lto != dbpackage.LtoDisabled &&
|
||||
(reLdError.MatchString(out.String()) || reRustLTOError.MatchString(out.String())) {
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
|
||||
return time.Since(start), errors.New("ld/lto-incompatibility error detected, LTO disabled")
|
||||
}
|
||||
|
||||
if reDownloadError.MatchString(out.String()) || reDownloadError2.MatchString(out.String()) ||
|
||||
rePortError.MatchString(out.String()) || reSigError.MatchString(out.String()) {
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
|
||||
return time.Since(start), errors.New("known build error detected")
|
||||
}
|
||||
|
||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0o755)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error creating logdir: %w", err)
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), //nolint:gosec
|
||||
[]byte(strings.ToValidUTF8(out.String(), "")), 0o644)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error warting to logdir: %w", err)
|
||||
}
|
||||
|
||||
p.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusFailed).
|
||||
ClearSkipReason().
|
||||
SetBuildTimeStart(start).
|
||||
ClearMaxRss().
|
||||
ClearLastVersionBuild().
|
||||
ClearIoOut().
|
||||
ClearIoIn().
|
||||
ClearUTime().
|
||||
ClearSTime().
|
||||
SetTagRev(p.State.TagRev).
|
||||
ExecX(ctx)
|
||||
return time.Since(start), fmt.Errorf("build failed: exit code %d", cmd.ProcessState.ExitCode())
|
||||
}
|
||||
|
||||
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*.pkg.tar.zst"))
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error scanning builddir for artifacts: %w", err)
|
||||
}
|
||||
|
||||
if len(pkgFiles) == 0 {
|
||||
return time.Since(start), errors.New("no build-artifacts found")
|
||||
}
|
||||
|
||||
for _, file := range pkgFiles {
|
||||
cmd = exec.Command("gpg", "--batch", "--detach-sign", file)
|
||||
res, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error while signing artifact: %w (%s)", err, string(res))
|
||||
}
|
||||
}
|
||||
|
||||
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*.pkg.tar.zst*"))
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error scanning builddir for artifacts: %w", err)
|
||||
}
|
||||
|
||||
holdingDir := filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo)
|
||||
for _, file := range copyFiles {
|
||||
err = os.MkdirAll(holdingDir, 0o755)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error creating %s: %w", holdingDir, err)
|
||||
}
|
||||
err = copy.Copy(file, filepath.Join(holdingDir, filepath.Base(file)))
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error while copying file to %s: %w", filepath.Join(holdingDir, filepath.Base(file)), err)
|
||||
}
|
||||
|
||||
if filepath.Ext(file) != ".sig" {
|
||||
p.PkgFiles = append(p.PkgFiles, filepath.Join(holdingDir, filepath.Base(file)))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log")); err == nil {
|
||||
err := os.Remove(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"))
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error removing log: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
updatePkg := p.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusBuilt).
|
||||
SetLto(dbpackage.LtoEnabled).
|
||||
SetBuildTimeStart(start).
|
||||
SetLastVersionBuild(p.Version).
|
||||
SetTagRev(p.State.TagRev).
|
||||
SetMaxRss(peakMem).
|
||||
SetIoOut(Rusage.Oublock).
|
||||
SetIoIn(Rusage.Inblock).
|
||||
SetUTime(Rusage.Utime.Sec).
|
||||
SetSTime(Rusage.Stime.Sec)
|
||||
|
||||
if p.DBPackage.Lto != dbpackage.LtoDisabled && p.DBPackage.Lto != dbpackage.LtoAutoDisabled {
|
||||
updatePkg.SetLto(dbpackage.LtoEnabled)
|
||||
}
|
||||
|
||||
updatePkg.ExecX(ctx)
|
||||
|
||||
return time.Since(start), nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) setupBuildDir(ctx context.Context) (string, error) {
|
||||
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
|
||||
|
||||
err := cleanBuildDir(buildDir, "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("removing old builddir failed: %w", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(buildDir, 0o755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
gitlabPath := reReplaceSinglePlus.ReplaceAllString(p.Pkgbase, "$1-$2")
|
||||
gitlabPath = reReplaceRemainingPlus.ReplaceAllString(gitlabPath, "plus")
|
||||
gitlabPath = reReplaceSpecialChars.ReplaceAllString(gitlabPath, "-")
|
||||
gitlabPath = reReplaceUnderscore.ReplaceAllString(gitlabPath, "-")
|
||||
gitlabPath = reReplaceTree.ReplaceAllString(gitlabPath, "unix-tree")
|
||||
|
||||
gr := retry.NewFibonacci(10 * time.Second)
|
||||
gr = retry.WithMaxRetries(conf.MaxCloneRetries, gr)
|
||||
|
||||
if err := retry.Do(ctx, gr, func(ctx context.Context) error {
|
||||
cmd := exec.CommandContext(ctx, "git", "clone", "--depth", "1", "--branch", p.State.TagVer, //nolint:gosec
|
||||
fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s.git", gitlabPath), buildDir)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD")
|
||||
|
||||
return buildDir, nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) repoVersion() (string, error) {
|
||||
if err := p.findPkgFiles(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(p.PkgFiles) == 0 {
|
||||
return "", errors.New("not found")
|
||||
}
|
||||
|
||||
fNameSplit := strings.Split(p.PkgFiles[0], "-")
|
||||
return fNameSplit[len(fNameSplit)-3] + "-" + fNameSplit[len(fNameSplit)-2], nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
||||
if p.Srcinfo == nil {
|
||||
err := p.genSrcinfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating srcinfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.Version == "" {
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func(f *os.File) {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}(f)
|
||||
|
||||
fStr, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// increase buildno if already existing
|
||||
var nStr string
|
||||
if strings.Contains(p.Srcinfo.Pkgrel, ".") {
|
||||
pkgRelSplit := strings.Split(p.Srcinfo.Pkgrel, ".")
|
||||
pkgRelBuildNo, err := strconv.Atoi(pkgRelSplit[len(pkgRelSplit)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nStr = rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+pkgRelSplit[0]+"."+strconv.Itoa(buildNo+pkgRelBuildNo))
|
||||
versionSplit := strings.Split(p.Version, "-")
|
||||
versionSplit[len(versionSplit)-1] = pkgRelSplit[0] + "." + strconv.Itoa(buildNo+pkgRelBuildNo)
|
||||
p.Version = strings.Join(versionSplit, "-")
|
||||
} else {
|
||||
nStr = rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+p.Srcinfo.Pkgrel+"."+strconv.Itoa(buildNo))
|
||||
p.Version += "." + strconv.Itoa(buildNo)
|
||||
}
|
||||
|
||||
_, err = f.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.Truncate(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.WriteString(nStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) importKeys() error {
|
||||
if p.Srcinfo == nil {
|
||||
err := p.genSrcinfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating srcinfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.Srcinfo.ValidPGPKeys != nil {
|
||||
args := []string{"--keyserver", "keyserver.ubuntu.com", "--recv-keys"}
|
||||
args = append(args, p.Srcinfo.ValidPGPKeys...)
|
||||
cmd := exec.Command("gpg", args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isAvailable(ctx context.Context, h *alpm.Handle) bool {
|
||||
dbs, err := h.SyncDBs()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
buildManager.alpmMutex.Lock()
|
||||
defer buildManager.alpmMutex.Unlock()
|
||||
|
||||
var pkg alpm.IPackage
|
||||
switch {
|
||||
case p.Srcinfo != nil:
|
||||
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
||||
case p.DBPackage != nil && len(p.DBPackage.Packages) > 0:
|
||||
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
|
||||
default:
|
||||
cmd := exec.CommandContext(ctx, "unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String(), //nolint:gosec
|
||||
"--sysroot="+filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
||||
var res []byte
|
||||
res, err = cmd.Output()
|
||||
if err != nil {
|
||||
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
|
||||
return false
|
||||
} else if len(res) == 0 {
|
||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(strings.Split(strings.TrimSpace(string(res)), "\n")) > 0 {
|
||||
pacsiftLines := strings.Split(strings.TrimSpace(string(res)), "\n")
|
||||
|
||||
var splitPkgs []string
|
||||
for _, line := range pacsiftLines {
|
||||
splitPkgs = append(splitPkgs, strings.Split(line, "/")[1])
|
||||
}
|
||||
|
||||
if p.DBPackage != nil {
|
||||
p.DBPackage, err = p.DBPackage.Update().SetPackages(splitPkgs).Save(ctx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
pkg, err = dbs.FindSatisfier(splitPkgs[0])
|
||||
} else {
|
||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Debugf("error resolving %s: %v", p.Pkgbase, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if pkg.DB().Name() != p.Repo.String() || pkg.Base() != p.Pkgbase {
|
||||
log.Debugf("%s: repo (%s!=%s) or pkgbase (%s!=%s) does not match", p.Pkgbase, pkg.DB().Name(), p.Repo.String(), pkg.Base(), p.Pkgbase)
|
||||
return false
|
||||
}
|
||||
|
||||
if p.Srcinfo != nil && (!Contains(p.Srcinfo.Arch, pkg.Architecture()) || p.Srcinfo.Pkgbase != pkg.Base()) {
|
||||
log.Debugf("%s: arch (%s!=%s) or pkgbase (%s!=%s) does not match", p.Pkgbase, p.Srcinfo.Arch[0],
|
||||
pkg.Architecture(), pkg.Base(), p.Pkgbase)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) GitVersion(h *alpm.Handle) (string, error) {
|
||||
if p.Pkgbase == "" {
|
||||
return "", errors.New("invalid arguments")
|
||||
}
|
||||
|
||||
stateFiles, _ := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase))
|
||||
|
||||
var fStateFiles []string
|
||||
for _, stateFile := range stateFiles {
|
||||
_, subRepo, _, err := stateFileMeta(stateFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if subRepo != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !Contains(fStateFiles, stateFile) {
|
||||
fStateFiles = append(fStateFiles, stateFile)
|
||||
}
|
||||
}
|
||||
|
||||
if len(fStateFiles) > 1 {
|
||||
log.Infof("%s: multiple statefiles found, try resolving from mirror", p.Pkgbase)
|
||||
dbs, err := h.SyncDBs()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buildManager.alpmMutex.Lock()
|
||||
iPackage, err := dbs.FindSatisfier(p.Pkgbase)
|
||||
buildManager.alpmMutex.Unlock()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, stateFile := range fStateFiles {
|
||||
repo, _, _, err := stateFileMeta(stateFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if iPackage.DB().Name() == repo {
|
||||
fStateFiles = []string{stateFile}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(fStateFiles) > 1 {
|
||||
return "", MultipleStateFilesError{fmt.Errorf("%s: multiple statefiles found: %s", p.Pkgbase, fStateFiles)}
|
||||
}
|
||||
log.Infof("%s: resolving successful: MirrorRepo=%s; statefile chosen: %s", p.Pkgbase, iPackage.DB().Name(), fStateFiles[0])
|
||||
} else if len(fStateFiles) == 0 {
|
||||
return "", fmt.Errorf("%s: no matching statefile found (searched: %s, canidates: %s)", p.Pkgbase,
|
||||
filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase), stateFiles)
|
||||
}
|
||||
|
||||
rawState, err := os.ReadFile(fStateFiles[0])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading statefile %s: %w", fStateFiles[0], err)
|
||||
}
|
||||
state, err := parseState(string(rawState))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing statefile: %w", err)
|
||||
}
|
||||
|
||||
return state.PkgVer, nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isPkgFailed() bool {
|
||||
if p.DBPackage.Version == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if alpm.VerCmp(p.DBPackage.Version, p.Version) < 0 {
|
||||
return false
|
||||
}
|
||||
return p.DBPackage.Status == dbpackage.StatusFailed
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) genSrcinfo() error {
|
||||
if p.Srcinfo != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild)) //nolint:gosec
|
||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
||||
res, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("makepkg exit non-zero (PKGBUILD: %s): %w (%s)", p.Pkgbuild, err, string(res))
|
||||
}
|
||||
|
||||
info, err := srcinfo.Parse(string(res))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Srcinfo = info
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) findPkgFiles() error {
|
||||
pkgs, err := os.ReadDir(filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.DBPackage == nil && p.Srcinfo == nil {
|
||||
return errors.New("unable to find pkgfiles without dbpkg or srcinfo present")
|
||||
}
|
||||
|
||||
var realPkgs []string
|
||||
if p.DBPackage != nil {
|
||||
realPkgs = append(realPkgs, p.DBPackage.Packages...)
|
||||
} else {
|
||||
for i := range p.Srcinfo.Packages {
|
||||
realPkgs = append(realPkgs, p.Srcinfo.Packages[i].Pkgname)
|
||||
}
|
||||
}
|
||||
|
||||
var fPkg []string
|
||||
for _, file := range pkgs {
|
||||
if !file.IsDir() && !strings.HasSuffix(file.Name(), ".sig") {
|
||||
matches := rePkgFile.FindStringSubmatch(file.Name())
|
||||
|
||||
if len(matches) > 1 && Contains(realPkgs, matches[1]) {
|
||||
fPkg = append(fPkg, filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch, file.Name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.PkgFiles = fPkg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) toDBPackage(ctx context.Context, create bool) error {
|
||||
if p.DBPackage != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
dbPkg, err := db.DBPackage.Query().Where(
|
||||
dbpackage.Pkgbase(p.Pkgbase),
|
||||
dbpackage.March(p.March),
|
||||
dbpackage.RepositoryEQ(p.Repo),
|
||||
).Only(ctx)
|
||||
if err != nil && ent.IsNotFound(err) && create {
|
||||
dbPkg, err = db.DBPackage.Create().
|
||||
SetPkgbase(p.Pkgbase).
|
||||
SetMarch(p.March).
|
||||
SetRepository(p.Repo).
|
||||
Save(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil && !ent.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
p.DBPackage = dbPkg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) exists() (bool, error) {
|
||||
dbPkg, err := db.DBPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return dbPkg, nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg *alpm.Package, version string, err error) {
|
||||
dbs, err := h.SyncDBs()
|
||||
if err != nil {
|
||||
return false, nil, "", err
|
||||
}
|
||||
|
||||
allDepends := p.Srcinfo.Depends
|
||||
allDepends = append(allDepends, p.Srcinfo.MakeDepends...)
|
||||
// add gcc to dependents, since we can't know for sure if its in use
|
||||
// prevents issues like #111
|
||||
allDepends = append(allDepends, srcinfo.ArchString{
|
||||
Arch: "x86_64",
|
||||
Value: "gcc",
|
||||
})
|
||||
|
||||
for _, dep := range allDepends {
|
||||
buildManager.alpmMutex.Lock()
|
||||
pkg, err := dbs.FindSatisfier(dep.Value)
|
||||
buildManager.alpmMutex.Unlock()
|
||||
if err != nil {
|
||||
return false, nil, "", UnableToSatisfyError{err}
|
||||
}
|
||||
|
||||
svn2gitVer, err := (&ProtoPackage{ //nolint:exhaustruct,exhaustivestruct
|
||||
Pkgbase: pkg.Base(),
|
||||
March: p.March,
|
||||
}).GitVersion(h)
|
||||
if err != nil {
|
||||
return false, nil, "", err
|
||||
} else if svn2gitVer == "" {
|
||||
return false, nil, "", errors.New("no svn2git version")
|
||||
}
|
||||
|
||||
if alpm.VerCmp(svn2gitVer, pkg.Version()) > 0 {
|
||||
switch v := pkg.(type) {
|
||||
case *alpm.Package:
|
||||
return false, v, svn2gitVer, nil
|
||||
default:
|
||||
return false, nil, "", fmt.Errorf("invalid package type: %T", pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil, "", nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) PkgbaseEquals(p2 *ProtoPackage, marchSensitive bool) bool {
|
||||
return (marchSensitive && (p.Pkgbase == p2.Pkgbase && p.FullRepo == p2.FullRepo)) || (!marchSensitive && p.Pkgbase == p2.Pkgbase)
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) IsBuilt() (bool, error) {
|
||||
if p.DBPackage == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
matches, err := filepath.Glob(filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo, p.DBPackage.Packages[0]+"*-x86_64.pkg.tar.zst"))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(matches) > 0, nil
|
||||
}
|
187
proto_package_test.go
Normal file
187
proto_package_test.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const PkgbuildTest = `# Maintainer: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
|
||||
|
||||
pkgname=gnome-todo
|
||||
pkgver=41.0+r69+ga9a5b7cd
|
||||
pkgrel=1
|
||||
pkgdesc="Task manager for GNOME"
|
||||
url="https://wiki.gnome.org/Apps/Todo"
|
||||
arch=(x86_64)
|
||||
license=(GPL)
|
||||
depends=(evolution-data-server libpeas python gtk4 libportal-gtk4 libadwaita)
|
||||
makedepends=(gobject-introspection appstream-glib git meson yelp-tools)
|
||||
groups=(gnome-extra)
|
||||
_commit=a9a5b7cdde0244331d2d49220f04018be60c018e # master
|
||||
source=("git+https://gitlab.gnome.org/GNOME/gnome-todo.git#commit=$_commit")
|
||||
sha256sums=('SKIP')
|
||||
|
||||
pkgver() {
|
||||
cd $pkgname
|
||||
git describe --tags | sed 's/^GNOME_TODO_//;s/_/./g;s/[^-]*-g/r&/;s/-/+/g'
|
||||
|
||||
}
|
||||
|
||||
prepare() {
|
||||
cd $pkgname
|
||||
}
|
||||
|
||||
build() {
|
||||
arch-meson $pkgname build
|
||||
meson compile -C build
|
||||
}
|
||||
|
||||
check() (
|
||||
glib-compile-schemas "${GSETTINGS_SCHEMA_DIR:=$PWD/$pkgname/data}"
|
||||
export GSETTINGS_SCHEMA_DIR
|
||||
|
||||
meson test -C build --print-errorlogs
|
||||
)
|
||||
|
||||
package() {
|
||||
meson install -C build --destdir "$pkgdir"
|
||||
}
|
||||
|
||||
# vim:set sw=2 et:
|
||||
`
|
||||
|
||||
const PkgbuildTestWithPkgrelSub = `# Maintainer: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
|
||||
|
||||
pkgname=gnome-todo
|
||||
pkgver=41.0+r69+ga9a5b7cd
|
||||
pkgrel=1.1
|
||||
pkgdesc="Task manager for GNOME"
|
||||
url="https://wiki.gnome.org/Apps/Todo"
|
||||
arch=(x86_64)
|
||||
license=(GPL)
|
||||
depends=(evolution-data-server libpeas python gtk4 libportal-gtk4 libadwaita)
|
||||
makedepends=(gobject-introspection appstream-glib git meson yelp-tools)
|
||||
groups=(gnome-extra)
|
||||
_commit=a9a5b7cdde0244331d2d49220f04018be60c018e # master
|
||||
source=("git+https://gitlab.gnome.org/GNOME/gnome-todo.git#commit=$_commit")
|
||||
sha256sums=('SKIP')
|
||||
|
||||
pkgver() {
|
||||
cd $pkgname
|
||||
git describe --tags | sed 's/^GNOME_TODO_//;s/_/./g;s/[^-]*-g/r&/;s/-/+/g'
|
||||
|
||||
}
|
||||
|
||||
prepare() {
|
||||
cd $pkgname
|
||||
}
|
||||
|
||||
build() {
|
||||
arch-meson $pkgname build
|
||||
meson compile -C build
|
||||
}
|
||||
|
||||
check() (
|
||||
glib-compile-schemas "${GSETTINGS_SCHEMA_DIR:=$PWD/$pkgname/data}"
|
||||
export GSETTINGS_SCHEMA_DIR
|
||||
|
||||
meson test -C build --print-errorlogs
|
||||
)
|
||||
|
||||
package() {
|
||||
meson install -C build --destdir "$pkgdir"
|
||||
}
|
||||
|
||||
# vim:set sw=2 et:
|
||||
`
|
||||
|
||||
func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
|
||||
pkgbuild, err := os.CreateTemp(t.TempDir(), "")
|
||||
if err != nil {
|
||||
t.Fatal("unable to setup temp. PKGBUILD")
|
||||
}
|
||||
defer func(name string) {
|
||||
_ = os.Remove(name)
|
||||
}(pkgbuild.Name())
|
||||
|
||||
_, err = pkgbuild.WriteString(PkgbuildTest)
|
||||
if err != nil {
|
||||
t.Fatal("unable to write to temp. PKGBUILD")
|
||||
}
|
||||
_ = pkgbuild.Close()
|
||||
|
||||
buildPkg := &ProtoPackage{
|
||||
Pkgbase: "gnome-todo",
|
||||
Pkgbuild: pkgbuild.Name(),
|
||||
}
|
||||
|
||||
err = buildPkg.increasePkgRel(1)
|
||||
if err != nil {
|
||||
t.Logf("increasePkgRel: %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
versionSplit := strings.Split(buildPkg.Version, "-")
|
||||
if versionSplit[len(versionSplit)-1] != "1.1" {
|
||||
t.Logf("increasePkgRel: expected 1.1 pkgrel, got: %s", buildPkg.Version)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
buildPkg.Srcinfo = nil
|
||||
err = buildPkg.genSrcinfo()
|
||||
if err != nil {
|
||||
t.Logf("increasePkgRel: %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if buildPkg.Srcinfo.Pkgrel != "1.1" {
|
||||
t.Logf("increasePkgRel: expected 1.1 pkgrel, got: %s", buildPkg.Srcinfo.Pkgrel)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncreasePkgRelWithPkgSub(t *testing.T) { //nolint:paralleltest
|
||||
pkgbuild, err := os.CreateTemp(t.TempDir(), "")
|
||||
if err != nil {
|
||||
t.Fatal("unable to setup temp. PKGBUILD")
|
||||
}
|
||||
defer func(name string) {
|
||||
_ = os.Remove(name)
|
||||
}(pkgbuild.Name())
|
||||
|
||||
_, err = pkgbuild.WriteString(PkgbuildTestWithPkgrelSub)
|
||||
if err != nil {
|
||||
t.Fatal("unable to write to temp. PKGBUILD")
|
||||
}
|
||||
_ = pkgbuild.Close()
|
||||
|
||||
buildPkg := &ProtoPackage{
|
||||
Pkgbase: "gnome-todo",
|
||||
Pkgbuild: pkgbuild.Name(),
|
||||
}
|
||||
|
||||
err = buildPkg.increasePkgRel(1)
|
||||
if err != nil {
|
||||
t.Logf("increasePkgRel: %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
versionSplit := strings.Split(buildPkg.Version, "-")
|
||||
if versionSplit[len(versionSplit)-1] != "1.2" {
|
||||
t.Logf("increasePkgRel: expected 1.2 pkgrel, got: %s", buildPkg.Version)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
buildPkg.Srcinfo = nil
|
||||
err = buildPkg.genSrcinfo()
|
||||
if err != nil {
|
||||
t.Logf("increasePkgRel: %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if buildPkg.Srcinfo.Pkgrel != "1.2" {
|
||||
t.Logf("increasePkgRel: expected 1.2 pkgrel, got: %s", buildPkg.Srcinfo.Pkgrel)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
18
rm_chroot.py
Normal file
18
rm_chroot.py
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SAVE_PATH = "/path/to/workdir"
|
||||
|
||||
try:
|
||||
chroot_abs = Path(sys.argv[1]).resolve(True)
|
||||
except:
|
||||
print("path does not resolve")
|
||||
sys.exit(1)
|
||||
|
||||
if str(chroot_abs).startswith(SAVE_PATH):
|
||||
os.system("rm -rf " + str(chroot_abs))
|
||||
else:
|
||||
sys.exit(2)
|
@@ -1,135 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<meta content="width=device-width, initial-scale=1" name="viewport"/>
|
||||
|
||||
<!-- Bootstrap CSS -->
|
||||
<link crossorigin="anonymous" href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.0/dist/css/bootstrap.min.css"
|
||||
integrity="sha384-KyZXEAg3QhqLMpG8r+8fhAXLRk2vvoC2f3B09zVXn8CA5QIVfZOJ3BCsw2P0p/We" rel="stylesheet"/>
|
||||
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.5.0/font/bootstrap-icons.css" rel="stylesheet"/>
|
||||
<style>
|
||||
.accordion:last-child {
|
||||
padding-bottom: 8vh;
|
||||
}
|
||||
</style>
|
||||
|
||||
<title>ALHP Status</title>
|
||||
</head>
|
||||
<body>
|
||||
<nav class="navbar navbar-expand-lg sticky-top navbar-light bg-info mb-5">
|
||||
<div class="container">
|
||||
<div class="d-flex justify-content-start">
|
||||
<span class="navbar-brand align-middle">ALHP Status</span>
|
||||
<span class="navbar-text">
|
||||
<a href="https://git.harting.dev/anonfunc/ALHP.GO" class="align-middle position-absolute"><i
|
||||
class="bi bi-github h4"></i></a>
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-end">
|
||||
<input type="search" placeholder="Search for packages.." class="form-control" id="table-sort-input"
|
||||
title="Type in a name"/>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div class="container">
|
||||
{{range $march := .March}}
|
||||
<h3>{{$march.Name}}</h3>
|
||||
<div class="accordion" id="accordion-{{$march.Name}}">
|
||||
{{range $repo := $march.Repos}}
|
||||
<div class="accordion-item">
|
||||
<h2 class="accordion-header" id="heading-{{$march.Name}}-{{$repo.Name}}">
|
||||
<button aria-controls="collapse-{{$march.Name}}-{{$repo.Name}}" aria-expanded="false"
|
||||
class="accordion-button"
|
||||
data-bs-target="#collapse-{{$march.Name}}-{{$repo.Name}}"
|
||||
data-bs-toggle="collapse"
|
||||
type="button">{{$repo.Name}}-{{$march.Name}}
|
||||
</button>
|
||||
</h2>
|
||||
<div aria-labelledby="heading-{{$march.Name}}-{{$repo.Name}}" class="accordion-collapse collapse show"
|
||||
data-bs-parent="#accordion-{{$march.Name}}" id="collapse-{{$march.Name}}-{{$repo.Name}}">
|
||||
<div class="accordion-body overflow-auto">
|
||||
<table class="table table-sorted">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Pkgbase</th>
|
||||
<th scope="col">Status</th>
|
||||
<th scope="col">Reason</th>
|
||||
<th scope="col">SVN2GIT Version</th>
|
||||
<th scope="col">{{$repo.Name}}-{{$march.Name}} Version</th>
|
||||
<th scope="col">Info</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range $pkg := $repo.Packages}}
|
||||
<tr class="{{$pkg.Class}}" id="{{$repo.Name}}-{{$march.Name}}-{{$pkg.Pkgbase}}">
|
||||
<td>{{$pkg.Pkgbase}}</td>
|
||||
<td>{{$pkg.Status}}</td>
|
||||
<td>{{$pkg.Skip}}</td>
|
||||
<td>{{$pkg.Svn2GitVersion}}</td>
|
||||
<td>{{$pkg.Version}}</td>
|
||||
<td class="text-center">
|
||||
{{with $pkg.Log}}<a href="{{.}}"
|
||||
><i class="bi bi-file-text-fill"></i></a
|
||||
>{{end}}
|
||||
<a data-bs-html="true" data-bs-placement="bottom" data-bs-toggle="tooltip"
|
||||
href="#{{$repo.Name}}-{{$march.Name}}-{{$pkg.Pkgbase}}"
|
||||
title="{{if $pkg.BuildDate}}Build Date: {{$pkg.BuildDate}} {{end}}{{if $pkg.BuildDuration}}Build Duration: {{$pkg.BuildDuration}} {{end}}Check date: {{$pkg.Checked}}">
|
||||
<i class="bi bi-info-circle-fill"></i></a>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
|
||||
<footer class="text-center text-lg-start bg-dark text-muted mt-3 fixed-bottom">
|
||||
<div class="text-center p-2" style="background-color: rgba(0, 0, 0, 0.05)">
|
||||
{{.Generated}}
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<script crossorigin="anonymous" integrity="sha384-U1DAWAznBHeqEIlVSCgzq+c9gqGAJn5c/t99JyeKa9xxaYpSvHU5awsuZVVFIhvj"
|
||||
src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.0/dist/js/bootstrap.bundle.min.js"></script>
|
||||
<script>
|
||||
let input = document.getElementById('table-sort-input');
|
||||
let timeout = null;
|
||||
|
||||
input.addEventListener('keyup', function (e) {
|
||||
clearTimeout(timeout);
|
||||
|
||||
timeout = setTimeout(searchFilter, 200);
|
||||
});
|
||||
|
||||
function searchFilter() {
|
||||
let input, filter, table, tr, td, i, txtValue;
|
||||
input = document.getElementById('table-sort-input')
|
||||
filter = input.value.toUpperCase()
|
||||
const tables = document.getElementsByClassName('table-sorted');
|
||||
for (let j = 0; j < tables.length; j++) {
|
||||
tr = tables[j].getElementsByTagName('tr')
|
||||
for (i = 0; i < tr.length; i++) {
|
||||
td = tr[i].getElementsByTagName('td')[0]
|
||||
if (td) {
|
||||
txtValue = td.textContent || td.innerText
|
||||
if (txtValue.toUpperCase().indexOf(filter) > -1) {
|
||||
tr[i].style.display = ''
|
||||
} else {
|
||||
tr[i].style.display = 'none'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
Reference in New Issue
Block a user