Compare commits

30 Commits
main ... main

Author SHA1 Message Date
c191c12237 update deps 2026-04-10 19:40:02 +02:00
e6833548a6 update deps 2025-12-19 20:42:48 +01:00
ed979ced99 update deps & regen ent 2025-10-17 20:42:22 +02:00
9c2fa9bc2d remove pacsift workaround 2025-05-09 21:46:12 +02:00
01404adad5 fix double convert to bytes 2025-03-22 23:49:07 +01:00
d057e18453 fix channel read 2025-03-22 23:37:52 +01:00
bcfaccfec5 switch result pointer with channel 2025-03-22 23:13:15 +01:00
f4f64e1999 add nil check 2025-03-22 21:58:38 +01:00
48c66d429a fix memory val not geeting updated 2025-03-22 21:46:51 +01:00
daf6f13542 try a different memory tracking approach 2025-03-22 21:21:58 +01:00
b73f1c8f60 use task childs instead of pgids to track build process 2025-03-16 21:25:17 +01:00
70a09fbc7d better errors 2025-03-16 21:06:19 +01:00
a933dee30f better process memory reading error handling 2025-03-16 20:49:26 +01:00
9369f36c29 add more debugging 2025-03-16 20:33:37 +01:00
cc754cf4c7 stop polling after build 2025-03-16 18:18:30 +01:00
185837bd3c switched to polling process tree rss usage to track mem usage 2025-03-16 18:16:01 +01:00
26d33f20d3 use promauto to add metric 2025-02-24 14:00:36 +01:00
362c5d379c use correct conf var 2025-02-24 12:10:01 +01:00
8784e63a9d minor context refactor; add metrics 2025-02-22 03:05:00 +01:00
1c90e20a10 fix check in housekeeping not checking the actual no-build list 2025-01-26 20:14:16 +01:00
2e080c8268 handle makepkg.conf.d files 2025-01-26 14:43:14 +01:00
1b76a0fcf3 use t.TempDir in tests 2025-01-26 13:33:57 +01:00
7ed9dac855 add housekeeping check for packages removed from the no-build list 2025-01-26 13:33:28 +01:00
9aa5ed719e update deps 2025-01-22 23:11:54 +01:00
3cfebda5e2 update deps 2024-12-15 02:18:53 +01:00
d4dbfdea1c revert back to yaml v2, fix envs with yaml lists 2024-12-15 02:11:28 +01:00
ab238cf897 update yaml to v3 2024-12-15 01:16:38 +01:00
7c8626dfcb update deps 2024-12-15 01:05:52 +01:00
9695d146c5 minor code cleanup 2024-12-15 01:03:39 +01:00
286134674b update deps 2024-11-20 23:04:35 +01:00
21 changed files with 1445 additions and 1174 deletions

View File

@@ -14,7 +14,7 @@ linters-settings:
disabled-checks: disabled-checks:
- whyNoLint - whyNoLint
- filepathJoin - filepathJoin
gomnd: mnd:
checks: checks:
- argument - argument
- case - case
@@ -91,3 +91,4 @@ linters:
- errname - errname
- goerr113 - goerr113
- depguard - depguard
- noctx

View File

@@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
"github.com/prometheus/client_golang/prometheus"
"github.com/sethvargo/go-retry" "github.com/sethvargo/go-retry"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"os" "os"
@@ -26,6 +27,9 @@ type BuildManager struct {
building []*ProtoPackage building []*ProtoPackage
buildingLock *sync.RWMutex buildingLock *sync.RWMutex
queueSignal chan struct{} queueSignal chan struct{}
metrics struct {
queueSize *prometheus.GaugeVec
}
} }
func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) error { func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) error {
@@ -74,12 +78,13 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
} }
// check if package can be built with current memory limit // check if package can be built with current memory limit
if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit { if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit { //nolint:gosec
log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase, log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase,
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit) datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit) //nolint:gosec
doneQLock.Lock() doneQLock.Lock()
doneQ = append(doneQ, pkg) doneQ = append(doneQ, pkg)
doneQLock.Unlock() doneQLock.Unlock()
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
continue continue
} }
@@ -88,9 +93,9 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
b.buildingLock.RUnlock() b.buildingLock.RUnlock()
// check if package can be build right now // check if package can be build right now
if currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit { if currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit { //nolint:gosec
log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s", log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s",
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March) datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March) //nolint:gosec
continue continue
} }
} else { } else {
@@ -106,14 +111,18 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
b.building = append(b.building, pkg) b.building = append(b.building, pkg)
b.buildingLock.Unlock() b.buildingLock.Unlock()
queueNoMatch = false queueNoMatch = false
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Dec()
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Inc()
go func(pkg *ProtoPackage) { go func(pkg *ProtoPackage) {
dur, err := pkg.build(ctx) dur, err := pkg.build(ctx)
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "building").Dec()
if err != nil && !errors.Is(err, ErrorNotEligible) { if err != nil && !errors.Is(err, ErrorNotEligible) {
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err) log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
} else if err == nil { } else if err == nil {
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur) log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "built").Inc()
} }
doneQLock.Lock() doneQLock.Lock()
b.buildingLock.Lock() b.buildingLock.Lock()
@@ -147,7 +156,7 @@ func (b *BuildManager) buildQueue(ctx context.Context, queue []*ProtoPackage) er
return nil return nil
} }
func (b *BuildManager) repoWorker(repo string) { func (b *BuildManager) repoWorker(ctx context.Context, repo string) {
for { for {
select { select {
case pkgL := <-b.repoAdd[repo]: case pkgL := <-b.repoAdd[repo]:
@@ -159,7 +168,7 @@ func (b *BuildManager) repoWorker(repo string) {
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"} args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"}
args = append(args, toAdd...) args = append(args, toAdd...)
cmd := exec.Command("repo-add", args...) cmd := exec.CommandContext(ctx, "repo-add", args...)
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil && cmd.ProcessState.ExitCode() != 1 { if err != nil && cmd.ProcessState.ExitCode() != 1 {
@@ -167,7 +176,7 @@ func (b *BuildManager) repoWorker(repo string) {
} }
for _, pkg := range pkgL { for _, pkg := range pkgL {
err = pkg.toDBPackage(true) err = pkg.toDBPackage(ctx, true)
if err != nil { if err != nil {
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err) log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
continue continue
@@ -185,10 +194,12 @@ func (b *BuildManager) repoWorker(repo string) {
} else { } else {
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable) pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
} }
pkg.DBPackage = pkgUpd.SaveX(context.Background()) if pkg.DBPackage, err = pkgUpd.Save(ctx); err != nil {
log.Error(err)
}
} }
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec cmd = exec.CommandContext(ctx, "paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
res, err = cmd.CombinedOutput() res, err = cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
@@ -211,7 +222,10 @@ func (b *BuildManager) repoWorker(repo string) {
continue continue
} else if len(pkg.PkgFiles) == 0 { } else if len(pkg.PkgFiles) == 0 {
if pkg.DBPackage != nil { if pkg.DBPackage != nil {
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(context.Background()) err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
if err != nil {
log.Error(err)
}
} }
continue continue
} }
@@ -231,7 +245,7 @@ func (b *BuildManager) repoWorker(repo string) {
b.repoWG.Add(1) b.repoWG.Add(1)
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"} args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
args = append(args, realPkgs...) args = append(args, realPkgs...)
cmd := exec.Command("repo-remove", args...) cmd := exec.CommandContext(ctx, "repo-remove", args...)
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil && cmd.ProcessState.ExitCode() == 1 { if err != nil && cmd.ProcessState.ExitCode() == 1 {
@@ -239,7 +253,10 @@ func (b *BuildManager) repoWorker(repo string) {
} }
if pkg.DBPackage != nil { if pkg.DBPackage != nil {
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(context.Background()) err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(ctx)
if err != nil {
log.Error(err)
}
} }
for _, file := range pkg.PkgFiles { for _, file := range pkg.PkgFiles {
@@ -265,14 +282,14 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
gitPath := filepath.Join(conf.Basedir.Work, stateDir) gitPath := filepath.Join(conf.Basedir.Work, stateDir)
for { for {
if _, err := os.Stat(gitPath); os.IsNotExist(err) { if _, err := os.Stat(gitPath); os.IsNotExist(err) {
cmd := exec.Command("git", "clone", "--depth=1", conf.StateRepo, gitPath) //nolint:gosec cmd := exec.CommandContext(ctx, "git", "clone", "--depth=1", conf.StateRepo, gitPath) //nolint:gosec
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
log.Fatalf("error cloning state repo: %v", err) log.Fatalf("error cloning state repo: %v", err)
} }
} else if err == nil { } else if err == nil {
cmd := exec.Command("git", "reset", "--hard") cmd := exec.CommandContext(ctx, "git", "reset", "--hard")
cmd.Dir = gitPath cmd.Dir = gitPath
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
@@ -280,7 +297,7 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
log.Fatalf("error reseting state repo: %v", err) log.Fatalf("error reseting state repo: %v", err)
} }
cmd = exec.Command("git", "pull") cmd = exec.CommandContext(ctx, "git", "pull")
cmd.Dir = gitPath cmd.Dir = gitPath
res, err = cmd.CombinedOutput() res, err = cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
@@ -294,8 +311,8 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
for _, repo := range repos { for _, repo := range repos {
wg.Add(1) wg.Add(1)
splitRepo := strings.Split(repo, "-") splitRepo := strings.Split(repo, "-")
go func() { //nolint:contextcheck go func() {
err := housekeeping(splitRepo[0], strings.Join(splitRepo[1:], "-"), wg) err := housekeeping(ctx, splitRepo[0], strings.Join(splitRepo[1:], "-"), wg)
if err != nil { if err != nil {
log.Warningf("[%s] housekeeping failed: %v", repo, err) log.Warningf("[%s] housekeeping failed: %v", repo, err)
} }
@@ -303,7 +320,7 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
} }
wg.Wait() wg.Wait()
err := logHK() //nolint:contextcheck err := logHK(ctx)
if err != nil { if err != nil {
log.Warningf("log-housekeeping failed: %v", err) log.Warningf("log-housekeeping failed: %v", err)
} }
@@ -317,7 +334,7 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
} }
if err := retry.Fibonacci(ctx, 1*time.Second, func(_ context.Context) error { if err := retry.Fibonacci(ctx, 1*time.Second, func(_ context.Context) error {
if err := setupChroot(); err != nil { if err := setupChroot(ctx); err != nil {
log.Warningf("unable to upgrade chroot, trying again later") log.Warningf("unable to upgrade chroot, trying again later")
return retry.RetryableError(err) return retry.RetryableError(err)
} }
@@ -333,20 +350,21 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
} }
b.alpmMutex.Unlock() b.alpmMutex.Unlock()
queue, err := b.genQueue() //nolint:contextcheck queue, err := b.genQueue(ctx)
if err != nil { if err != nil {
log.Errorf("error building queue: %v", err) log.Errorf("error building queue: %v", err)
} else { return err
log.Debugf("build-queue with %d items", len(queue)) }
err = b.buildQueue(ctx, queue)
if err != nil { log.Debugf("build-queue with %d items", len(queue))
return err err = b.buildQueue(ctx, queue)
} if err != nil {
return err
} }
if ctx.Err() == nil { if ctx.Err() == nil {
for _, repo := range repos { for _, repo := range repos {
err = movePackagesLive(repo) //nolint:contextcheck err = movePackagesLive(ctx, repo)
if err != nil { if err != nil {
log.Errorf("[%s] error moving packages live: %v", repo, err) log.Errorf("[%s] error moving packages live: %v", repo, err)
} }
@@ -355,12 +373,13 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
return ctx.Err() return ctx.Err()
} }
b.metrics.queueSize.Reset()
log.Debugf("build-cycle finished") log.Debugf("build-cycle finished")
time.Sleep(time.Duration(*checkInterval) * time.Minute) time.Sleep(time.Duration(*checkInterval) * time.Minute)
} }
} }
func (b *BuildManager) genQueue() ([]*ProtoPackage, error) { func (b *BuildManager) genQueue(ctx context.Context) ([]*ProtoPackage, error) {
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*")) stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error scanning for state-files: %w", err) return nil, fmt.Errorf("error scanning for state-files: %w", err)
@@ -406,13 +425,13 @@ func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
Arch: arch, Arch: arch,
} }
err = pkg.toDBPackage(false) err = pkg.toDBPackage(ctx, false)
if err != nil { if err != nil {
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err) log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
continue continue
} }
if !pkg.isAvailable(alpmHandle) { if !pkg.isAvailable(ctx, alpmHandle) {
log.Debugf("[QG] %s->%s not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase) log.Debugf("[QG] %s->%s not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
continue continue
} }
@@ -427,7 +446,7 @@ func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
} }
if pkg.DBPackage == nil { if pkg.DBPackage == nil {
err = pkg.toDBPackage(true) err = pkg.toDBPackage(ctx, true)
if err != nil { if err != nil {
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err) log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
continue continue
@@ -438,18 +457,22 @@ func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
continue continue
} }
// try download srcinfo from repo // try download .SRCINFO from repo
srcInfo, err := downloadSRCINFO(pkg.DBPackage.Pkgbase, state.TagRev) srcInfo, err := downloadSRCINFO(pkg.DBPackage.Pkgbase, state.TagRev)
if err == nil { if err == nil {
pkg.Srcinfo = srcInfo pkg.Srcinfo = srcInfo
} }
if !pkg.isEligible(context.Background()) { if !pkg.isEligible(ctx) {
continue continue
} }
pkg.DBPackage = pkg.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SaveX(context.Background()) pkg.DBPackage, err = pkg.DBPackage.Update().SetStatus(dbpackage.StatusQueued).Save(ctx)
if err != nil {
log.Warningf("[QG] error updating dbpackage %s: %v", state.Pkgbase, err)
}
pkgbuilds = append(pkgbuilds, pkg) pkgbuilds = append(pkgbuilds, pkg)
b.metrics.queueSize.WithLabelValues(pkg.FullRepo, "queued").Inc()
} }
} }

View File

@@ -45,3 +45,6 @@ build:
logging: logging:
level: INFO level: INFO
metrics:
port: 9568

View File

@@ -251,8 +251,8 @@ func (c *DBPackageClient) Update() *DBPackageUpdate {
} }
// UpdateOne returns an update builder for the given entity. // UpdateOne returns an update builder for the given entity.
func (c *DBPackageClient) UpdateOne(dp *DBPackage) *DBPackageUpdateOne { func (c *DBPackageClient) UpdateOne(_m *DBPackage) *DBPackageUpdateOne {
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(dp)) mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(_m))
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
} }
@@ -269,8 +269,8 @@ func (c *DBPackageClient) Delete() *DBPackageDelete {
} }
// DeleteOne returns a builder for deleting the given entity. // DeleteOne returns a builder for deleting the given entity.
func (c *DBPackageClient) DeleteOne(dp *DBPackage) *DBPackageDeleteOne { func (c *DBPackageClient) DeleteOne(_m *DBPackage) *DBPackageDeleteOne {
return c.DeleteOneID(dp.ID) return c.DeleteOneID(_m.ID)
} }
// DeleteOneID returns a builder for deleting the given entity by its id. // DeleteOneID returns a builder for deleting the given entity by its id.

View File

@@ -83,7 +83,7 @@ func (*DBPackage) scanValues(columns []string) ([]any, error) {
// assignValues assigns the values that were returned from sql.Rows (after scanning) // assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DBPackage fields. // to the DBPackage fields.
func (dp *DBPackage) assignValues(columns []string, values []any) error { func (_m *DBPackage) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n { if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
} }
@@ -94,18 +94,18 @@ func (dp *DBPackage) assignValues(columns []string, values []any) error {
if !ok { if !ok {
return fmt.Errorf("unexpected type %T for field id", value) return fmt.Errorf("unexpected type %T for field id", value)
} }
dp.ID = int(value.Int64) _m.ID = int(value.Int64)
case dbpackage.FieldPkgbase: case dbpackage.FieldPkgbase:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field pkgbase", values[i]) return fmt.Errorf("unexpected type %T for field pkgbase", values[i])
} else if value.Valid { } else if value.Valid {
dp.Pkgbase = value.String _m.Pkgbase = value.String
} }
case dbpackage.FieldPackages: case dbpackage.FieldPackages:
if value, ok := values[i].(*[]byte); !ok { if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field packages", values[i]) return fmt.Errorf("unexpected type %T for field packages", values[i])
} else if value != nil && len(*value) > 0 { } else if value != nil && len(*value) > 0 {
if err := json.Unmarshal(*value, &dp.Packages); err != nil { if err := json.Unmarshal(*value, &_m.Packages); err != nil {
return fmt.Errorf("unmarshal field packages: %w", err) return fmt.Errorf("unmarshal field packages: %w", err)
} }
} }
@@ -113,118 +113,118 @@ func (dp *DBPackage) assignValues(columns []string, values []any) error {
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field status", values[i]) return fmt.Errorf("unexpected type %T for field status", values[i])
} else if value.Valid { } else if value.Valid {
dp.Status = dbpackage.Status(value.String) _m.Status = dbpackage.Status(value.String)
} }
case dbpackage.FieldSkipReason: case dbpackage.FieldSkipReason:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field skip_reason", values[i]) return fmt.Errorf("unexpected type %T for field skip_reason", values[i])
} else if value.Valid { } else if value.Valid {
dp.SkipReason = value.String _m.SkipReason = value.String
} }
case dbpackage.FieldRepository: case dbpackage.FieldRepository:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field repository", values[i]) return fmt.Errorf("unexpected type %T for field repository", values[i])
} else if value.Valid { } else if value.Valid {
dp.Repository = dbpackage.Repository(value.String) _m.Repository = dbpackage.Repository(value.String)
} }
case dbpackage.FieldMarch: case dbpackage.FieldMarch:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field march", values[i]) return fmt.Errorf("unexpected type %T for field march", values[i])
} else if value.Valid { } else if value.Valid {
dp.March = value.String _m.March = value.String
} }
case dbpackage.FieldVersion: case dbpackage.FieldVersion:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field version", values[i]) return fmt.Errorf("unexpected type %T for field version", values[i])
} else if value.Valid { } else if value.Valid {
dp.Version = value.String _m.Version = value.String
} }
case dbpackage.FieldRepoVersion: case dbpackage.FieldRepoVersion:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field repo_version", values[i]) return fmt.Errorf("unexpected type %T for field repo_version", values[i])
} else if value.Valid { } else if value.Valid {
dp.RepoVersion = value.String _m.RepoVersion = value.String
} }
case dbpackage.FieldBuildTimeStart: case dbpackage.FieldBuildTimeStart:
if value, ok := values[i].(*sql.NullTime); !ok { if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field build_time_start", values[i]) return fmt.Errorf("unexpected type %T for field build_time_start", values[i])
} else if value.Valid { } else if value.Valid {
dp.BuildTimeStart = value.Time _m.BuildTimeStart = value.Time
} }
case dbpackage.FieldUpdated: case dbpackage.FieldUpdated:
if value, ok := values[i].(*sql.NullTime); !ok { if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated", values[i]) return fmt.Errorf("unexpected type %T for field updated", values[i])
} else if value.Valid { } else if value.Valid {
dp.Updated = value.Time _m.Updated = value.Time
} }
case dbpackage.FieldLto: case dbpackage.FieldLto:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field lto", values[i]) return fmt.Errorf("unexpected type %T for field lto", values[i])
} else if value.Valid { } else if value.Valid {
dp.Lto = dbpackage.Lto(value.String) _m.Lto = dbpackage.Lto(value.String)
} }
case dbpackage.FieldLastVersionBuild: case dbpackage.FieldLastVersionBuild:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field last_version_build", values[i]) return fmt.Errorf("unexpected type %T for field last_version_build", values[i])
} else if value.Valid { } else if value.Valid {
dp.LastVersionBuild = value.String _m.LastVersionBuild = value.String
} }
case dbpackage.FieldLastVerified: case dbpackage.FieldLastVerified:
if value, ok := values[i].(*sql.NullTime); !ok { if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field last_verified", values[i]) return fmt.Errorf("unexpected type %T for field last_verified", values[i])
} else if value.Valid { } else if value.Valid {
dp.LastVerified = value.Time _m.LastVerified = value.Time
} }
case dbpackage.FieldDebugSymbols: case dbpackage.FieldDebugSymbols:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field debug_symbols", values[i]) return fmt.Errorf("unexpected type %T for field debug_symbols", values[i])
} else if value.Valid { } else if value.Valid {
dp.DebugSymbols = dbpackage.DebugSymbols(value.String) _m.DebugSymbols = dbpackage.DebugSymbols(value.String)
} }
case dbpackage.FieldMaxRss: case dbpackage.FieldMaxRss:
if value, ok := values[i].(*sql.NullInt64); !ok { if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field max_rss", values[i]) return fmt.Errorf("unexpected type %T for field max_rss", values[i])
} else if value.Valid { } else if value.Valid {
dp.MaxRss = new(int64) _m.MaxRss = new(int64)
*dp.MaxRss = value.Int64 *_m.MaxRss = value.Int64
} }
case dbpackage.FieldUTime: case dbpackage.FieldUTime:
if value, ok := values[i].(*sql.NullInt64); !ok { if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field u_time", values[i]) return fmt.Errorf("unexpected type %T for field u_time", values[i])
} else if value.Valid { } else if value.Valid {
dp.UTime = new(int64) _m.UTime = new(int64)
*dp.UTime = value.Int64 *_m.UTime = value.Int64
} }
case dbpackage.FieldSTime: case dbpackage.FieldSTime:
if value, ok := values[i].(*sql.NullInt64); !ok { if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field s_time", values[i]) return fmt.Errorf("unexpected type %T for field s_time", values[i])
} else if value.Valid { } else if value.Valid {
dp.STime = new(int64) _m.STime = new(int64)
*dp.STime = value.Int64 *_m.STime = value.Int64
} }
case dbpackage.FieldIoIn: case dbpackage.FieldIoIn:
if value, ok := values[i].(*sql.NullInt64); !ok { if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field io_in", values[i]) return fmt.Errorf("unexpected type %T for field io_in", values[i])
} else if value.Valid { } else if value.Valid {
dp.IoIn = new(int64) _m.IoIn = new(int64)
*dp.IoIn = value.Int64 *_m.IoIn = value.Int64
} }
case dbpackage.FieldIoOut: case dbpackage.FieldIoOut:
if value, ok := values[i].(*sql.NullInt64); !ok { if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field io_out", values[i]) return fmt.Errorf("unexpected type %T for field io_out", values[i])
} else if value.Valid { } else if value.Valid {
dp.IoOut = new(int64) _m.IoOut = new(int64)
*dp.IoOut = value.Int64 *_m.IoOut = value.Int64
} }
case dbpackage.FieldTagRev: case dbpackage.FieldTagRev:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field tag_rev", values[i]) return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
} else if value.Valid { } else if value.Valid {
dp.TagRev = new(string) _m.TagRev = new(string)
*dp.TagRev = value.String *_m.TagRev = value.String
} }
default: default:
dp.selectValues.Set(columns[i], values[i]) _m.selectValues.Set(columns[i], values[i])
} }
} }
return nil return nil
@@ -232,101 +232,101 @@ func (dp *DBPackage) assignValues(columns []string, values []any) error {
// Value returns the ent.Value that was dynamically selected and assigned to the DBPackage. // Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
// This includes values selected through modifiers, order, etc. // This includes values selected through modifiers, order, etc.
func (dp *DBPackage) Value(name string) (ent.Value, error) { func (_m *DBPackage) Value(name string) (ent.Value, error) {
return dp.selectValues.Get(name) return _m.selectValues.Get(name)
} }
// Update returns a builder for updating this DBPackage. // Update returns a builder for updating this DBPackage.
// Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage // Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (dp *DBPackage) Update() *DBPackageUpdateOne { func (_m *DBPackage) Update() *DBPackageUpdateOne {
return NewDBPackageClient(dp.config).UpdateOne(dp) return NewDBPackageClient(_m.config).UpdateOne(_m)
} }
// Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed, // Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction. // so that all future queries will be executed through the driver which created the transaction.
func (dp *DBPackage) Unwrap() *DBPackage { func (_m *DBPackage) Unwrap() *DBPackage {
_tx, ok := dp.config.driver.(*txDriver) _tx, ok := _m.config.driver.(*txDriver)
if !ok { if !ok {
panic("ent: DBPackage is not a transactional entity") panic("ent: DBPackage is not a transactional entity")
} }
dp.config.driver = _tx.drv _m.config.driver = _tx.drv
return dp return _m
} }
// String implements the fmt.Stringer. // String implements the fmt.Stringer.
func (dp *DBPackage) String() string { func (_m *DBPackage) String() string {
var builder strings.Builder var builder strings.Builder
builder.WriteString("DBPackage(") builder.WriteString("DBPackage(")
builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID)) builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
builder.WriteString("pkgbase=") builder.WriteString("pkgbase=")
builder.WriteString(dp.Pkgbase) builder.WriteString(_m.Pkgbase)
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("packages=") builder.WriteString("packages=")
builder.WriteString(fmt.Sprintf("%v", dp.Packages)) builder.WriteString(fmt.Sprintf("%v", _m.Packages))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("status=") builder.WriteString("status=")
builder.WriteString(fmt.Sprintf("%v", dp.Status)) builder.WriteString(fmt.Sprintf("%v", _m.Status))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("skip_reason=") builder.WriteString("skip_reason=")
builder.WriteString(dp.SkipReason) builder.WriteString(_m.SkipReason)
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("repository=") builder.WriteString("repository=")
builder.WriteString(fmt.Sprintf("%v", dp.Repository)) builder.WriteString(fmt.Sprintf("%v", _m.Repository))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("march=") builder.WriteString("march=")
builder.WriteString(dp.March) builder.WriteString(_m.March)
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("version=") builder.WriteString("version=")
builder.WriteString(dp.Version) builder.WriteString(_m.Version)
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("repo_version=") builder.WriteString("repo_version=")
builder.WriteString(dp.RepoVersion) builder.WriteString(_m.RepoVersion)
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("build_time_start=") builder.WriteString("build_time_start=")
builder.WriteString(dp.BuildTimeStart.Format(time.ANSIC)) builder.WriteString(_m.BuildTimeStart.Format(time.ANSIC))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("updated=") builder.WriteString("updated=")
builder.WriteString(dp.Updated.Format(time.ANSIC)) builder.WriteString(_m.Updated.Format(time.ANSIC))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("lto=") builder.WriteString("lto=")
builder.WriteString(fmt.Sprintf("%v", dp.Lto)) builder.WriteString(fmt.Sprintf("%v", _m.Lto))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("last_version_build=") builder.WriteString("last_version_build=")
builder.WriteString(dp.LastVersionBuild) builder.WriteString(_m.LastVersionBuild)
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("last_verified=") builder.WriteString("last_verified=")
builder.WriteString(dp.LastVerified.Format(time.ANSIC)) builder.WriteString(_m.LastVerified.Format(time.ANSIC))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("debug_symbols=") builder.WriteString("debug_symbols=")
builder.WriteString(fmt.Sprintf("%v", dp.DebugSymbols)) builder.WriteString(fmt.Sprintf("%v", _m.DebugSymbols))
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.MaxRss; v != nil { if v := _m.MaxRss; v != nil {
builder.WriteString("max_rss=") builder.WriteString("max_rss=")
builder.WriteString(fmt.Sprintf("%v", *v)) builder.WriteString(fmt.Sprintf("%v", *v))
} }
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.UTime; v != nil { if v := _m.UTime; v != nil {
builder.WriteString("u_time=") builder.WriteString("u_time=")
builder.WriteString(fmt.Sprintf("%v", *v)) builder.WriteString(fmt.Sprintf("%v", *v))
} }
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.STime; v != nil { if v := _m.STime; v != nil {
builder.WriteString("s_time=") builder.WriteString("s_time=")
builder.WriteString(fmt.Sprintf("%v", *v)) builder.WriteString(fmt.Sprintf("%v", *v))
} }
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.IoIn; v != nil { if v := _m.IoIn; v != nil {
builder.WriteString("io_in=") builder.WriteString("io_in=")
builder.WriteString(fmt.Sprintf("%v", *v)) builder.WriteString(fmt.Sprintf("%v", *v))
} }
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.IoOut; v != nil { if v := _m.IoOut; v != nil {
builder.WriteString("io_out=") builder.WriteString("io_out=")
builder.WriteString(fmt.Sprintf("%v", *v)) builder.WriteString(fmt.Sprintf("%v", *v))
} }
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.TagRev; v != nil { if v := _m.TagRev; v != nil {
builder.WriteString("tag_rev=") builder.WriteString("tag_rev=")
builder.WriteString(*v) builder.WriteString(*v)
} }

View File

@@ -21,267 +21,267 @@ type DBPackageCreate struct {
} }
// SetPkgbase sets the "pkgbase" field. // SetPkgbase sets the "pkgbase" field.
func (dpc *DBPackageCreate) SetPkgbase(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetPkgbase(v string) *DBPackageCreate {
dpc.mutation.SetPkgbase(s) _c.mutation.SetPkgbase(v)
return dpc return _c
} }
// SetPackages sets the "packages" field. // SetPackages sets the "packages" field.
func (dpc *DBPackageCreate) SetPackages(s []string) *DBPackageCreate { func (_c *DBPackageCreate) SetPackages(v []string) *DBPackageCreate {
dpc.mutation.SetPackages(s) _c.mutation.SetPackages(v)
return dpc return _c
} }
// SetStatus sets the "status" field. // SetStatus sets the "status" field.
func (dpc *DBPackageCreate) SetStatus(d dbpackage.Status) *DBPackageCreate { func (_c *DBPackageCreate) SetStatus(v dbpackage.Status) *DBPackageCreate {
dpc.mutation.SetStatus(d) _c.mutation.SetStatus(v)
return dpc return _c
} }
// SetNillableStatus sets the "status" field if the given value is not nil. // SetNillableStatus sets the "status" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableStatus(d *dbpackage.Status) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableStatus(v *dbpackage.Status) *DBPackageCreate {
if d != nil { if v != nil {
dpc.SetStatus(*d) _c.SetStatus(*v)
} }
return dpc return _c
} }
// SetSkipReason sets the "skip_reason" field. // SetSkipReason sets the "skip_reason" field.
func (dpc *DBPackageCreate) SetSkipReason(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetSkipReason(v string) *DBPackageCreate {
dpc.mutation.SetSkipReason(s) _c.mutation.SetSkipReason(v)
return dpc return _c
} }
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil. // SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableSkipReason(s *string) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableSkipReason(v *string) *DBPackageCreate {
if s != nil { if v != nil {
dpc.SetSkipReason(*s) _c.SetSkipReason(*v)
} }
return dpc return _c
} }
// SetRepository sets the "repository" field. // SetRepository sets the "repository" field.
func (dpc *DBPackageCreate) SetRepository(d dbpackage.Repository) *DBPackageCreate { func (_c *DBPackageCreate) SetRepository(v dbpackage.Repository) *DBPackageCreate {
dpc.mutation.SetRepository(d) _c.mutation.SetRepository(v)
return dpc return _c
} }
// SetMarch sets the "march" field. // SetMarch sets the "march" field.
func (dpc *DBPackageCreate) SetMarch(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetMarch(v string) *DBPackageCreate {
dpc.mutation.SetMarch(s) _c.mutation.SetMarch(v)
return dpc return _c
} }
// SetVersion sets the "version" field. // SetVersion sets the "version" field.
func (dpc *DBPackageCreate) SetVersion(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetVersion(v string) *DBPackageCreate {
dpc.mutation.SetVersion(s) _c.mutation.SetVersion(v)
return dpc return _c
} }
// SetNillableVersion sets the "version" field if the given value is not nil. // SetNillableVersion sets the "version" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableVersion(s *string) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableVersion(v *string) *DBPackageCreate {
if s != nil { if v != nil {
dpc.SetVersion(*s) _c.SetVersion(*v)
} }
return dpc return _c
} }
// SetRepoVersion sets the "repo_version" field. // SetRepoVersion sets the "repo_version" field.
func (dpc *DBPackageCreate) SetRepoVersion(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetRepoVersion(v string) *DBPackageCreate {
dpc.mutation.SetRepoVersion(s) _c.mutation.SetRepoVersion(v)
return dpc return _c
} }
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil. // SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableRepoVersion(s *string) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableRepoVersion(v *string) *DBPackageCreate {
if s != nil { if v != nil {
dpc.SetRepoVersion(*s) _c.SetRepoVersion(*v)
} }
return dpc return _c
} }
// SetBuildTimeStart sets the "build_time_start" field. // SetBuildTimeStart sets the "build_time_start" field.
func (dpc *DBPackageCreate) SetBuildTimeStart(t time.Time) *DBPackageCreate { func (_c *DBPackageCreate) SetBuildTimeStart(v time.Time) *DBPackageCreate {
dpc.mutation.SetBuildTimeStart(t) _c.mutation.SetBuildTimeStart(v)
return dpc return _c
} }
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil. // SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableBuildTimeStart(v *time.Time) *DBPackageCreate {
if t != nil { if v != nil {
dpc.SetBuildTimeStart(*t) _c.SetBuildTimeStart(*v)
} }
return dpc return _c
} }
// SetUpdated sets the "updated" field. // SetUpdated sets the "updated" field.
func (dpc *DBPackageCreate) SetUpdated(t time.Time) *DBPackageCreate { func (_c *DBPackageCreate) SetUpdated(v time.Time) *DBPackageCreate {
dpc.mutation.SetUpdated(t) _c.mutation.SetUpdated(v)
return dpc return _c
} }
// SetNillableUpdated sets the "updated" field if the given value is not nil. // SetNillableUpdated sets the "updated" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableUpdated(t *time.Time) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableUpdated(v *time.Time) *DBPackageCreate {
if t != nil { if v != nil {
dpc.SetUpdated(*t) _c.SetUpdated(*v)
} }
return dpc return _c
} }
// SetLto sets the "lto" field. // SetLto sets the "lto" field.
func (dpc *DBPackageCreate) SetLto(d dbpackage.Lto) *DBPackageCreate { func (_c *DBPackageCreate) SetLto(v dbpackage.Lto) *DBPackageCreate {
dpc.mutation.SetLto(d) _c.mutation.SetLto(v)
return dpc return _c
} }
// SetNillableLto sets the "lto" field if the given value is not nil. // SetNillableLto sets the "lto" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableLto(d *dbpackage.Lto) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableLto(v *dbpackage.Lto) *DBPackageCreate {
if d != nil { if v != nil {
dpc.SetLto(*d) _c.SetLto(*v)
} }
return dpc return _c
} }
// SetLastVersionBuild sets the "last_version_build" field. // SetLastVersionBuild sets the "last_version_build" field.
func (dpc *DBPackageCreate) SetLastVersionBuild(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetLastVersionBuild(v string) *DBPackageCreate {
dpc.mutation.SetLastVersionBuild(s) _c.mutation.SetLastVersionBuild(v)
return dpc return _c
} }
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil. // SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableLastVersionBuild(s *string) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableLastVersionBuild(v *string) *DBPackageCreate {
if s != nil { if v != nil {
dpc.SetLastVersionBuild(*s) _c.SetLastVersionBuild(*v)
} }
return dpc return _c
} }
// SetLastVerified sets the "last_verified" field. // SetLastVerified sets the "last_verified" field.
func (dpc *DBPackageCreate) SetLastVerified(t time.Time) *DBPackageCreate { func (_c *DBPackageCreate) SetLastVerified(v time.Time) *DBPackageCreate {
dpc.mutation.SetLastVerified(t) _c.mutation.SetLastVerified(v)
return dpc return _c
} }
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil. // SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableLastVerified(t *time.Time) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableLastVerified(v *time.Time) *DBPackageCreate {
if t != nil { if v != nil {
dpc.SetLastVerified(*t) _c.SetLastVerified(*v)
} }
return dpc return _c
} }
// SetDebugSymbols sets the "debug_symbols" field. // SetDebugSymbols sets the "debug_symbols" field.
func (dpc *DBPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DBPackageCreate { func (_c *DBPackageCreate) SetDebugSymbols(v dbpackage.DebugSymbols) *DBPackageCreate {
dpc.mutation.SetDebugSymbols(ds) _c.mutation.SetDebugSymbols(v)
return dpc return _c
} }
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil. // SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableDebugSymbols(v *dbpackage.DebugSymbols) *DBPackageCreate {
if ds != nil { if v != nil {
dpc.SetDebugSymbols(*ds) _c.SetDebugSymbols(*v)
} }
return dpc return _c
} }
// SetMaxRss sets the "max_rss" field. // SetMaxRss sets the "max_rss" field.
func (dpc *DBPackageCreate) SetMaxRss(i int64) *DBPackageCreate { func (_c *DBPackageCreate) SetMaxRss(v int64) *DBPackageCreate {
dpc.mutation.SetMaxRss(i) _c.mutation.SetMaxRss(v)
return dpc return _c
} }
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil. // SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableMaxRss(i *int64) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableMaxRss(v *int64) *DBPackageCreate {
if i != nil { if v != nil {
dpc.SetMaxRss(*i) _c.SetMaxRss(*v)
} }
return dpc return _c
} }
// SetUTime sets the "u_time" field. // SetUTime sets the "u_time" field.
func (dpc *DBPackageCreate) SetUTime(i int64) *DBPackageCreate { func (_c *DBPackageCreate) SetUTime(v int64) *DBPackageCreate {
dpc.mutation.SetUTime(i) _c.mutation.SetUTime(v)
return dpc return _c
} }
// SetNillableUTime sets the "u_time" field if the given value is not nil. // SetNillableUTime sets the "u_time" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableUTime(i *int64) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableUTime(v *int64) *DBPackageCreate {
if i != nil { if v != nil {
dpc.SetUTime(*i) _c.SetUTime(*v)
} }
return dpc return _c
} }
// SetSTime sets the "s_time" field. // SetSTime sets the "s_time" field.
func (dpc *DBPackageCreate) SetSTime(i int64) *DBPackageCreate { func (_c *DBPackageCreate) SetSTime(v int64) *DBPackageCreate {
dpc.mutation.SetSTime(i) _c.mutation.SetSTime(v)
return dpc return _c
} }
// SetNillableSTime sets the "s_time" field if the given value is not nil. // SetNillableSTime sets the "s_time" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableSTime(i *int64) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableSTime(v *int64) *DBPackageCreate {
if i != nil { if v != nil {
dpc.SetSTime(*i) _c.SetSTime(*v)
} }
return dpc return _c
} }
// SetIoIn sets the "io_in" field. // SetIoIn sets the "io_in" field.
func (dpc *DBPackageCreate) SetIoIn(i int64) *DBPackageCreate { func (_c *DBPackageCreate) SetIoIn(v int64) *DBPackageCreate {
dpc.mutation.SetIoIn(i) _c.mutation.SetIoIn(v)
return dpc return _c
} }
// SetNillableIoIn sets the "io_in" field if the given value is not nil. // SetNillableIoIn sets the "io_in" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableIoIn(i *int64) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableIoIn(v *int64) *DBPackageCreate {
if i != nil { if v != nil {
dpc.SetIoIn(*i) _c.SetIoIn(*v)
} }
return dpc return _c
} }
// SetIoOut sets the "io_out" field. // SetIoOut sets the "io_out" field.
func (dpc *DBPackageCreate) SetIoOut(i int64) *DBPackageCreate { func (_c *DBPackageCreate) SetIoOut(v int64) *DBPackageCreate {
dpc.mutation.SetIoOut(i) _c.mutation.SetIoOut(v)
return dpc return _c
} }
// SetNillableIoOut sets the "io_out" field if the given value is not nil. // SetNillableIoOut sets the "io_out" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableIoOut(i *int64) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableIoOut(v *int64) *DBPackageCreate {
if i != nil { if v != nil {
dpc.SetIoOut(*i) _c.SetIoOut(*v)
} }
return dpc return _c
} }
// SetTagRev sets the "tag_rev" field. // SetTagRev sets the "tag_rev" field.
func (dpc *DBPackageCreate) SetTagRev(s string) *DBPackageCreate { func (_c *DBPackageCreate) SetTagRev(v string) *DBPackageCreate {
dpc.mutation.SetTagRev(s) _c.mutation.SetTagRev(v)
return dpc return _c
} }
// SetNillableTagRev sets the "tag_rev" field if the given value is not nil. // SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
func (dpc *DBPackageCreate) SetNillableTagRev(s *string) *DBPackageCreate { func (_c *DBPackageCreate) SetNillableTagRev(v *string) *DBPackageCreate {
if s != nil { if v != nil {
dpc.SetTagRev(*s) _c.SetTagRev(*v)
} }
return dpc return _c
} }
// Mutation returns the DBPackageMutation object of the builder. // Mutation returns the DBPackageMutation object of the builder.
func (dpc *DBPackageCreate) Mutation() *DBPackageMutation { func (_c *DBPackageCreate) Mutation() *DBPackageMutation {
return dpc.mutation return _c.mutation
} }
// Save creates the DBPackage in the database. // Save creates the DBPackage in the database.
func (dpc *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) { func (_c *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
dpc.defaults() _c.defaults()
return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks) return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
} }
// SaveX calls Save and panics if Save returns an error. // SaveX calls Save and panics if Save returns an error.
func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage { func (_c *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
v, err := dpc.Save(ctx) v, err := _c.Save(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -289,71 +289,71 @@ func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
} }
// Exec executes the query. // Exec executes the query.
func (dpc *DBPackageCreate) Exec(ctx context.Context) error { func (_c *DBPackageCreate) Exec(ctx context.Context) error {
_, err := dpc.Save(ctx) _, err := _c.Save(ctx)
return err return err
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpc *DBPackageCreate) ExecX(ctx context.Context) { func (_c *DBPackageCreate) ExecX(ctx context.Context) {
if err := dpc.Exec(ctx); err != nil { if err := _c.Exec(ctx); err != nil {
panic(err) panic(err)
} }
} }
// defaults sets the default values of the builder before save. // defaults sets the default values of the builder before save.
func (dpc *DBPackageCreate) defaults() { func (_c *DBPackageCreate) defaults() {
if _, ok := dpc.mutation.Status(); !ok { if _, ok := _c.mutation.Status(); !ok {
v := dbpackage.DefaultStatus v := dbpackage.DefaultStatus
dpc.mutation.SetStatus(v) _c.mutation.SetStatus(v)
} }
if _, ok := dpc.mutation.Lto(); !ok { if _, ok := _c.mutation.Lto(); !ok {
v := dbpackage.DefaultLto v := dbpackage.DefaultLto
dpc.mutation.SetLto(v) _c.mutation.SetLto(v)
} }
if _, ok := dpc.mutation.DebugSymbols(); !ok { if _, ok := _c.mutation.DebugSymbols(); !ok {
v := dbpackage.DefaultDebugSymbols v := dbpackage.DefaultDebugSymbols
dpc.mutation.SetDebugSymbols(v) _c.mutation.SetDebugSymbols(v)
} }
} }
// check runs all checks and user-defined validators on the builder. // check runs all checks and user-defined validators on the builder.
func (dpc *DBPackageCreate) check() error { func (_c *DBPackageCreate) check() error {
if _, ok := dpc.mutation.Pkgbase(); !ok { if _, ok := _c.mutation.Pkgbase(); !ok {
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)} return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
} }
if v, ok := dpc.mutation.Pkgbase(); ok { if v, ok := _c.mutation.Pkgbase(); ok {
if err := dbpackage.PkgbaseValidator(v); err != nil { if err := dbpackage.PkgbaseValidator(v); err != nil {
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)} return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
} }
} }
if v, ok := dpc.mutation.Status(); ok { if v, ok := _c.mutation.Status(); ok {
if err := dbpackage.StatusValidator(v); err != nil { if err := dbpackage.StatusValidator(v); err != nil {
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)} return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
} }
} }
if _, ok := dpc.mutation.Repository(); !ok { if _, ok := _c.mutation.Repository(); !ok {
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)} return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
} }
if v, ok := dpc.mutation.Repository(); ok { if v, ok := _c.mutation.Repository(); ok {
if err := dbpackage.RepositoryValidator(v); err != nil { if err := dbpackage.RepositoryValidator(v); err != nil {
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)} return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
} }
} }
if _, ok := dpc.mutation.March(); !ok { if _, ok := _c.mutation.March(); !ok {
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)} return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
} }
if v, ok := dpc.mutation.March(); ok { if v, ok := _c.mutation.March(); ok {
if err := dbpackage.MarchValidator(v); err != nil { if err := dbpackage.MarchValidator(v); err != nil {
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)} return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
} }
} }
if v, ok := dpc.mutation.Lto(); ok { if v, ok := _c.mutation.Lto(); ok {
if err := dbpackage.LtoValidator(v); err != nil { if err := dbpackage.LtoValidator(v); err != nil {
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)} return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
} }
} }
if v, ok := dpc.mutation.DebugSymbols(); ok { if v, ok := _c.mutation.DebugSymbols(); ok {
if err := dbpackage.DebugSymbolsValidator(v); err != nil { if err := dbpackage.DebugSymbolsValidator(v); err != nil {
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)} return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
} }
@@ -361,12 +361,12 @@ func (dpc *DBPackageCreate) check() error {
return nil return nil
} }
func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) { func (_c *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
if err := dpc.check(); err != nil { if err := _c.check(); err != nil {
return nil, err return nil, err
} }
_node, _spec := dpc.createSpec() _node, _spec := _c.createSpec()
if err := sqlgraph.CreateNode(ctx, dpc.driver, _spec); err != nil { if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) { if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err} err = &ConstraintError{msg: err.Error(), wrap: err}
} }
@@ -374,93 +374,93 @@ func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
} }
id := _spec.ID.Value.(int64) id := _spec.ID.Value.(int64)
_node.ID = int(id) _node.ID = int(id)
dpc.mutation.id = &_node.ID _c.mutation.id = &_node.ID
dpc.mutation.done = true _c.mutation.done = true
return _node, nil return _node, nil
} }
func (dpc *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) { func (_c *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
var ( var (
_node = &DBPackage{config: dpc.config} _node = &DBPackage{config: _c.config}
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) _spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
) )
if value, ok := dpc.mutation.Pkgbase(); ok { if value, ok := _c.mutation.Pkgbase(); ok {
_spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value) _spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value)
_node.Pkgbase = value _node.Pkgbase = value
} }
if value, ok := dpc.mutation.Packages(); ok { if value, ok := _c.mutation.Packages(); ok {
_spec.SetField(dbpackage.FieldPackages, field.TypeJSON, value) _spec.SetField(dbpackage.FieldPackages, field.TypeJSON, value)
_node.Packages = value _node.Packages = value
} }
if value, ok := dpc.mutation.Status(); ok { if value, ok := _c.mutation.Status(); ok {
_spec.SetField(dbpackage.FieldStatus, field.TypeEnum, value) _spec.SetField(dbpackage.FieldStatus, field.TypeEnum, value)
_node.Status = value _node.Status = value
} }
if value, ok := dpc.mutation.SkipReason(); ok { if value, ok := _c.mutation.SkipReason(); ok {
_spec.SetField(dbpackage.FieldSkipReason, field.TypeString, value) _spec.SetField(dbpackage.FieldSkipReason, field.TypeString, value)
_node.SkipReason = value _node.SkipReason = value
} }
if value, ok := dpc.mutation.Repository(); ok { if value, ok := _c.mutation.Repository(); ok {
_spec.SetField(dbpackage.FieldRepository, field.TypeEnum, value) _spec.SetField(dbpackage.FieldRepository, field.TypeEnum, value)
_node.Repository = value _node.Repository = value
} }
if value, ok := dpc.mutation.March(); ok { if value, ok := _c.mutation.March(); ok {
_spec.SetField(dbpackage.FieldMarch, field.TypeString, value) _spec.SetField(dbpackage.FieldMarch, field.TypeString, value)
_node.March = value _node.March = value
} }
if value, ok := dpc.mutation.Version(); ok { if value, ok := _c.mutation.Version(); ok {
_spec.SetField(dbpackage.FieldVersion, field.TypeString, value) _spec.SetField(dbpackage.FieldVersion, field.TypeString, value)
_node.Version = value _node.Version = value
} }
if value, ok := dpc.mutation.RepoVersion(); ok { if value, ok := _c.mutation.RepoVersion(); ok {
_spec.SetField(dbpackage.FieldRepoVersion, field.TypeString, value) _spec.SetField(dbpackage.FieldRepoVersion, field.TypeString, value)
_node.RepoVersion = value _node.RepoVersion = value
} }
if value, ok := dpc.mutation.BuildTimeStart(); ok { if value, ok := _c.mutation.BuildTimeStart(); ok {
_spec.SetField(dbpackage.FieldBuildTimeStart, field.TypeTime, value) _spec.SetField(dbpackage.FieldBuildTimeStart, field.TypeTime, value)
_node.BuildTimeStart = value _node.BuildTimeStart = value
} }
if value, ok := dpc.mutation.Updated(); ok { if value, ok := _c.mutation.Updated(); ok {
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value) _spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
_node.Updated = value _node.Updated = value
} }
if value, ok := dpc.mutation.Lto(); ok { if value, ok := _c.mutation.Lto(); ok {
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value) _spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
_node.Lto = value _node.Lto = value
} }
if value, ok := dpc.mutation.LastVersionBuild(); ok { if value, ok := _c.mutation.LastVersionBuild(); ok {
_spec.SetField(dbpackage.FieldLastVersionBuild, field.TypeString, value) _spec.SetField(dbpackage.FieldLastVersionBuild, field.TypeString, value)
_node.LastVersionBuild = value _node.LastVersionBuild = value
} }
if value, ok := dpc.mutation.LastVerified(); ok { if value, ok := _c.mutation.LastVerified(); ok {
_spec.SetField(dbpackage.FieldLastVerified, field.TypeTime, value) _spec.SetField(dbpackage.FieldLastVerified, field.TypeTime, value)
_node.LastVerified = value _node.LastVerified = value
} }
if value, ok := dpc.mutation.DebugSymbols(); ok { if value, ok := _c.mutation.DebugSymbols(); ok {
_spec.SetField(dbpackage.FieldDebugSymbols, field.TypeEnum, value) _spec.SetField(dbpackage.FieldDebugSymbols, field.TypeEnum, value)
_node.DebugSymbols = value _node.DebugSymbols = value
} }
if value, ok := dpc.mutation.MaxRss(); ok { if value, ok := _c.mutation.MaxRss(); ok {
_spec.SetField(dbpackage.FieldMaxRss, field.TypeInt64, value) _spec.SetField(dbpackage.FieldMaxRss, field.TypeInt64, value)
_node.MaxRss = &value _node.MaxRss = &value
} }
if value, ok := dpc.mutation.UTime(); ok { if value, ok := _c.mutation.UTime(); ok {
_spec.SetField(dbpackage.FieldUTime, field.TypeInt64, value) _spec.SetField(dbpackage.FieldUTime, field.TypeInt64, value)
_node.UTime = &value _node.UTime = &value
} }
if value, ok := dpc.mutation.STime(); ok { if value, ok := _c.mutation.STime(); ok {
_spec.SetField(dbpackage.FieldSTime, field.TypeInt64, value) _spec.SetField(dbpackage.FieldSTime, field.TypeInt64, value)
_node.STime = &value _node.STime = &value
} }
if value, ok := dpc.mutation.IoIn(); ok { if value, ok := _c.mutation.IoIn(); ok {
_spec.SetField(dbpackage.FieldIoIn, field.TypeInt64, value) _spec.SetField(dbpackage.FieldIoIn, field.TypeInt64, value)
_node.IoIn = &value _node.IoIn = &value
} }
if value, ok := dpc.mutation.IoOut(); ok { if value, ok := _c.mutation.IoOut(); ok {
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value) _spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
_node.IoOut = &value _node.IoOut = &value
} }
if value, ok := dpc.mutation.TagRev(); ok { if value, ok := _c.mutation.TagRev(); ok {
_spec.SetField(dbpackage.FieldTagRev, field.TypeString, value) _spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
_node.TagRev = &value _node.TagRev = &value
} }
@@ -475,16 +475,16 @@ type DBPackageCreateBulk struct {
} }
// Save creates the DBPackage entities in the database. // Save creates the DBPackage entities in the database.
func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) { func (_c *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
if dpcb.err != nil { if _c.err != nil {
return nil, dpcb.err return nil, _c.err
} }
specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders)) specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
nodes := make([]*DBPackage, len(dpcb.builders)) nodes := make([]*DBPackage, len(_c.builders))
mutators := make([]Mutator, len(dpcb.builders)) mutators := make([]Mutator, len(_c.builders))
for i := range dpcb.builders { for i := range _c.builders {
func(i int, root context.Context) { func(i int, root context.Context) {
builder := dpcb.builders[i] builder := _c.builders[i]
builder.defaults() builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DBPackageMutation) mutation, ok := m.(*DBPackageMutation)
@@ -498,11 +498,11 @@ func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error)
var err error var err error
nodes[i], specs[i] = builder.createSpec() nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 { if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dpcb.builders[i+1].mutation) _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
} else { } else {
spec := &sqlgraph.BatchCreateSpec{Nodes: specs} spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
// Invoke the actual operation on the latest mutation in the chain. // Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, dpcb.driver, spec); err != nil { if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
if sqlgraph.IsConstraintError(err) { if sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err} err = &ConstraintError{msg: err.Error(), wrap: err}
} }
@@ -526,7 +526,7 @@ func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error)
}(i, ctx) }(i, ctx)
} }
if len(mutators) > 0 { if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, dpcb.builders[0].mutation); err != nil { if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
return nil, err return nil, err
} }
} }
@@ -534,8 +534,8 @@ func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error)
} }
// SaveX is like Save, but panics if an error occurs. // SaveX is like Save, but panics if an error occurs.
func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage { func (_c *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
v, err := dpcb.Save(ctx) v, err := _c.Save(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -543,14 +543,14 @@ func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
} }
// Exec executes the query. // Exec executes the query.
func (dpcb *DBPackageCreateBulk) Exec(ctx context.Context) error { func (_c *DBPackageCreateBulk) Exec(ctx context.Context) error {
_, err := dpcb.Save(ctx) _, err := _c.Save(ctx)
return err return err
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpcb *DBPackageCreateBulk) ExecX(ctx context.Context) { func (_c *DBPackageCreateBulk) ExecX(ctx context.Context) {
if err := dpcb.Exec(ctx); err != nil { if err := _c.Exec(ctx); err != nil {
panic(err) panic(err)
} }
} }

View File

@@ -20,56 +20,56 @@ type DBPackageDelete struct {
} }
// Where appends a list predicates to the DBPackageDelete builder. // Where appends a list predicates to the DBPackageDelete builder.
func (dpd *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete { func (_d *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
dpd.mutation.Where(ps...) _d.mutation.Where(ps...)
return dpd return _d
} }
// Exec executes the deletion query and returns how many vertices were deleted. // Exec executes the deletion query and returns how many vertices were deleted.
func (dpd *DBPackageDelete) Exec(ctx context.Context) (int, error) { func (_d *DBPackageDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks) return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpd *DBPackageDelete) ExecX(ctx context.Context) int { func (_d *DBPackageDelete) ExecX(ctx context.Context) int {
n, err := dpd.Exec(ctx) n, err := _d.Exec(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
return n return n
} }
func (dpd *DBPackageDelete) sqlExec(ctx context.Context) (int, error) { func (_d *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) _spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
if ps := dpd.mutation.predicates; len(ps) > 0 { if ps := _d.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) { _spec.Predicate = func(selector *sql.Selector) {
for i := range ps { for i := range ps {
ps[i](selector) ps[i](selector)
} }
} }
} }
affected, err := sqlgraph.DeleteNodes(ctx, dpd.driver, _spec) affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
if err != nil && sqlgraph.IsConstraintError(err) { if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err} err = &ConstraintError{msg: err.Error(), wrap: err}
} }
dpd.mutation.done = true _d.mutation.done = true
return affected, err return affected, err
} }
// DBPackageDeleteOne is the builder for deleting a single DBPackage entity. // DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
type DBPackageDeleteOne struct { type DBPackageDeleteOne struct {
dpd *DBPackageDelete _d *DBPackageDelete
} }
// Where appends a list predicates to the DBPackageDelete builder. // Where appends a list predicates to the DBPackageDelete builder.
func (dpdo *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne { func (_d *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
dpdo.dpd.mutation.Where(ps...) _d._d.mutation.Where(ps...)
return dpdo return _d
} }
// Exec executes the deletion query. // Exec executes the deletion query.
func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error { func (_d *DBPackageDeleteOne) Exec(ctx context.Context) error {
n, err := dpdo.dpd.Exec(ctx) n, err := _d._d.Exec(ctx)
switch { switch {
case err != nil: case err != nil:
return err return err
@@ -81,8 +81,8 @@ func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error {
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpdo *DBPackageDeleteOne) ExecX(ctx context.Context) { func (_d *DBPackageDeleteOne) ExecX(ctx context.Context) {
if err := dpdo.Exec(ctx); err != nil { if err := _d.Exec(ctx); err != nil {
panic(err) panic(err)
} }
} }

View File

@@ -29,40 +29,40 @@ type DBPackageQuery struct {
} }
// Where adds a new predicate for the DBPackageQuery builder. // Where adds a new predicate for the DBPackageQuery builder.
func (dpq *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery { func (_q *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery {
dpq.predicates = append(dpq.predicates, ps...) _q.predicates = append(_q.predicates, ps...)
return dpq return _q
} }
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (dpq *DBPackageQuery) Limit(limit int) *DBPackageQuery { func (_q *DBPackageQuery) Limit(limit int) *DBPackageQuery {
dpq.ctx.Limit = &limit _q.ctx.Limit = &limit
return dpq return _q
} }
// Offset to start from. // Offset to start from.
func (dpq *DBPackageQuery) Offset(offset int) *DBPackageQuery { func (_q *DBPackageQuery) Offset(offset int) *DBPackageQuery {
dpq.ctx.Offset = &offset _q.ctx.Offset = &offset
return dpq return _q
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (dpq *DBPackageQuery) Unique(unique bool) *DBPackageQuery { func (_q *DBPackageQuery) Unique(unique bool) *DBPackageQuery {
dpq.ctx.Unique = &unique _q.ctx.Unique = &unique
return dpq return _q
} }
// Order specifies how the records should be ordered. // Order specifies how the records should be ordered.
func (dpq *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery { func (_q *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery {
dpq.order = append(dpq.order, o...) _q.order = append(_q.order, o...)
return dpq return _q
} }
// First returns the first DBPackage entity from the query. // First returns the first DBPackage entity from the query.
// Returns a *NotFoundError when no DBPackage was found. // Returns a *NotFoundError when no DBPackage was found.
func (dpq *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) { func (_q *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
nodes, err := dpq.Limit(1).All(setContextOp(ctx, dpq.ctx, ent.OpQueryFirst)) nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -73,8 +73,8 @@ func (dpq *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
} }
// FirstX is like First, but panics if an error occurs. // FirstX is like First, but panics if an error occurs.
func (dpq *DBPackageQuery) FirstX(ctx context.Context) *DBPackage { func (_q *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
node, err := dpq.First(ctx) node, err := _q.First(ctx)
if err != nil && !IsNotFound(err) { if err != nil && !IsNotFound(err) {
panic(err) panic(err)
} }
@@ -83,9 +83,9 @@ func (dpq *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
// FirstID returns the first DBPackage ID from the query. // FirstID returns the first DBPackage ID from the query.
// Returns a *NotFoundError when no DBPackage ID was found. // Returns a *NotFoundError when no DBPackage ID was found.
func (dpq *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) { func (_q *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int var ids []int
if ids, err = dpq.Limit(1).IDs(setContextOp(ctx, dpq.ctx, ent.OpQueryFirstID)); err != nil { if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
return return
} }
if len(ids) == 0 { if len(ids) == 0 {
@@ -96,8 +96,8 @@ func (dpq *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
} }
// FirstIDX is like FirstID, but panics if an error occurs. // FirstIDX is like FirstID, but panics if an error occurs.
func (dpq *DBPackageQuery) FirstIDX(ctx context.Context) int { func (_q *DBPackageQuery) FirstIDX(ctx context.Context) int {
id, err := dpq.FirstID(ctx) id, err := _q.FirstID(ctx)
if err != nil && !IsNotFound(err) { if err != nil && !IsNotFound(err) {
panic(err) panic(err)
} }
@@ -107,8 +107,8 @@ func (dpq *DBPackageQuery) FirstIDX(ctx context.Context) int {
// Only returns a single DBPackage entity found by the query, ensuring it only returns one. // Only returns a single DBPackage entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DBPackage entity is found. // Returns a *NotSingularError when more than one DBPackage entity is found.
// Returns a *NotFoundError when no DBPackage entities are found. // Returns a *NotFoundError when no DBPackage entities are found.
func (dpq *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) { func (_q *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
nodes, err := dpq.Limit(2).All(setContextOp(ctx, dpq.ctx, ent.OpQueryOnly)) nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -123,8 +123,8 @@ func (dpq *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
} }
// OnlyX is like Only, but panics if an error occurs. // OnlyX is like Only, but panics if an error occurs.
func (dpq *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage { func (_q *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
node, err := dpq.Only(ctx) node, err := _q.Only(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -134,9 +134,9 @@ func (dpq *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
// OnlyID is like Only, but returns the only DBPackage ID in the query. // OnlyID is like Only, but returns the only DBPackage ID in the query.
// Returns a *NotSingularError when more than one DBPackage ID is found. // Returns a *NotSingularError when more than one DBPackage ID is found.
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (dpq *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) { func (_q *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int var ids []int
if ids, err = dpq.Limit(2).IDs(setContextOp(ctx, dpq.ctx, ent.OpQueryOnlyID)); err != nil { if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
return return
} }
switch len(ids) { switch len(ids) {
@@ -151,8 +151,8 @@ func (dpq *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
} }
// OnlyIDX is like OnlyID, but panics if an error occurs. // OnlyIDX is like OnlyID, but panics if an error occurs.
func (dpq *DBPackageQuery) OnlyIDX(ctx context.Context) int { func (_q *DBPackageQuery) OnlyIDX(ctx context.Context) int {
id, err := dpq.OnlyID(ctx) id, err := _q.OnlyID(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -160,18 +160,18 @@ func (dpq *DBPackageQuery) OnlyIDX(ctx context.Context) int {
} }
// All executes the query and returns a list of DBPackages. // All executes the query and returns a list of DBPackages.
func (dpq *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) { func (_q *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) {
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryAll) ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
if err := dpq.prepareQuery(ctx); err != nil { if err := _q.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
qr := querierAll[[]*DBPackage, *DBPackageQuery]() qr := querierAll[[]*DBPackage, *DBPackageQuery]()
return withInterceptors[[]*DBPackage](ctx, dpq, qr, dpq.inters) return withInterceptors[[]*DBPackage](ctx, _q, qr, _q.inters)
} }
// AllX is like All, but panics if an error occurs. // AllX is like All, but panics if an error occurs.
func (dpq *DBPackageQuery) AllX(ctx context.Context) []*DBPackage { func (_q *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
nodes, err := dpq.All(ctx) nodes, err := _q.All(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -179,20 +179,20 @@ func (dpq *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
} }
// IDs executes the query and returns a list of DBPackage IDs. // IDs executes the query and returns a list of DBPackage IDs.
func (dpq *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) { func (_q *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
if dpq.ctx.Unique == nil && dpq.path != nil { if _q.ctx.Unique == nil && _q.path != nil {
dpq.Unique(true) _q.Unique(true)
} }
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryIDs) ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
if err = dpq.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil { if err = _q.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil {
return nil, err return nil, err
} }
return ids, nil return ids, nil
} }
// IDsX is like IDs, but panics if an error occurs. // IDsX is like IDs, but panics if an error occurs.
func (dpq *DBPackageQuery) IDsX(ctx context.Context) []int { func (_q *DBPackageQuery) IDsX(ctx context.Context) []int {
ids, err := dpq.IDs(ctx) ids, err := _q.IDs(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -200,17 +200,17 @@ func (dpq *DBPackageQuery) IDsX(ctx context.Context) []int {
} }
// Count returns the count of the given query. // Count returns the count of the given query.
func (dpq *DBPackageQuery) Count(ctx context.Context) (int, error) { func (_q *DBPackageQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryCount) ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
if err := dpq.prepareQuery(ctx); err != nil { if err := _q.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
return withInterceptors[int](ctx, dpq, querierCount[*DBPackageQuery](), dpq.inters) return withInterceptors[int](ctx, _q, querierCount[*DBPackageQuery](), _q.inters)
} }
// CountX is like Count, but panics if an error occurs. // CountX is like Count, but panics if an error occurs.
func (dpq *DBPackageQuery) CountX(ctx context.Context) int { func (_q *DBPackageQuery) CountX(ctx context.Context) int {
count, err := dpq.Count(ctx) count, err := _q.Count(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -218,9 +218,9 @@ func (dpq *DBPackageQuery) CountX(ctx context.Context) int {
} }
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (dpq *DBPackageQuery) Exist(ctx context.Context) (bool, error) { func (_q *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, dpq.ctx, ent.OpQueryExist) ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
switch _, err := dpq.FirstID(ctx); { switch _, err := _q.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
return false, nil return false, nil
case err != nil: case err != nil:
@@ -231,8 +231,8 @@ func (dpq *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
} }
// ExistX is like Exist, but panics if an error occurs. // ExistX is like Exist, but panics if an error occurs.
func (dpq *DBPackageQuery) ExistX(ctx context.Context) bool { func (_q *DBPackageQuery) ExistX(ctx context.Context) bool {
exist, err := dpq.Exist(ctx) exist, err := _q.Exist(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -241,20 +241,20 @@ func (dpq *DBPackageQuery) ExistX(ctx context.Context) bool {
// Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be // Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made. // used to prepare common query builders and use them differently after the clone is made.
func (dpq *DBPackageQuery) Clone() *DBPackageQuery { func (_q *DBPackageQuery) Clone() *DBPackageQuery {
if dpq == nil { if _q == nil {
return nil return nil
} }
return &DBPackageQuery{ return &DBPackageQuery{
config: dpq.config, config: _q.config,
ctx: dpq.ctx.Clone(), ctx: _q.ctx.Clone(),
order: append([]dbpackage.OrderOption{}, dpq.order...), order: append([]dbpackage.OrderOption{}, _q.order...),
inters: append([]Interceptor{}, dpq.inters...), inters: append([]Interceptor{}, _q.inters...),
predicates: append([]predicate.DBPackage{}, dpq.predicates...), predicates: append([]predicate.DBPackage{}, _q.predicates...),
// clone intermediate query. // clone intermediate query.
sql: dpq.sql.Clone(), sql: _q.sql.Clone(),
path: dpq.path, path: _q.path,
modifiers: append([]func(*sql.Selector){}, dpq.modifiers...), modifiers: append([]func(*sql.Selector){}, _q.modifiers...),
} }
} }
@@ -272,10 +272,10 @@ func (dpq *DBPackageQuery) Clone() *DBPackageQuery {
// GroupBy(dbpackage.FieldPkgbase). // GroupBy(dbpackage.FieldPkgbase).
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (dpq *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy { func (_q *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy {
dpq.ctx.Fields = append([]string{field}, fields...) _q.ctx.Fields = append([]string{field}, fields...)
grbuild := &DBPackageGroupBy{build: dpq} grbuild := &DBPackageGroupBy{build: _q}
grbuild.flds = &dpq.ctx.Fields grbuild.flds = &_q.ctx.Fields
grbuild.label = dbpackage.Label grbuild.label = dbpackage.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
return grbuild return grbuild
@@ -293,65 +293,65 @@ func (dpq *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGro
// client.DBPackage.Query(). // client.DBPackage.Query().
// Select(dbpackage.FieldPkgbase). // Select(dbpackage.FieldPkgbase).
// Scan(ctx, &v) // Scan(ctx, &v)
func (dpq *DBPackageQuery) Select(fields ...string) *DBPackageSelect { func (_q *DBPackageQuery) Select(fields ...string) *DBPackageSelect {
dpq.ctx.Fields = append(dpq.ctx.Fields, fields...) _q.ctx.Fields = append(_q.ctx.Fields, fields...)
sbuild := &DBPackageSelect{DBPackageQuery: dpq} sbuild := &DBPackageSelect{DBPackageQuery: _q}
sbuild.label = dbpackage.Label sbuild.label = dbpackage.Label
sbuild.flds, sbuild.scan = &dpq.ctx.Fields, sbuild.Scan sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
// Aggregate returns a DBPackageSelect configured with the given aggregations. // Aggregate returns a DBPackageSelect configured with the given aggregations.
func (dpq *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect { func (_q *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
return dpq.Select().Aggregate(fns...) return _q.Select().Aggregate(fns...)
} }
func (dpq *DBPackageQuery) prepareQuery(ctx context.Context) error { func (_q *DBPackageQuery) prepareQuery(ctx context.Context) error {
for _, inter := range dpq.inters { for _, inter := range _q.inters {
if inter == nil { if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
} }
if trv, ok := inter.(Traverser); ok { if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, dpq); err != nil { if err := trv.Traverse(ctx, _q); err != nil {
return err return err
} }
} }
} }
for _, f := range dpq.ctx.Fields { for _, f := range _q.ctx.Fields {
if !dbpackage.ValidColumn(f) { if !dbpackage.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
} }
} }
if dpq.path != nil { if _q.path != nil {
prev, err := dpq.path(ctx) prev, err := _q.path(ctx)
if err != nil { if err != nil {
return err return err
} }
dpq.sql = prev _q.sql = prev
} }
return nil return nil
} }
func (dpq *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) { func (_q *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) {
var ( var (
nodes = []*DBPackage{} nodes = []*DBPackage{}
_spec = dpq.querySpec() _spec = _q.querySpec()
) )
_spec.ScanValues = func(columns []string) ([]any, error) { _spec.ScanValues = func(columns []string) ([]any, error) {
return (*DBPackage).scanValues(nil, columns) return (*DBPackage).scanValues(nil, columns)
} }
_spec.Assign = func(columns []string, values []any) error { _spec.Assign = func(columns []string, values []any) error {
node := &DBPackage{config: dpq.config} node := &DBPackage{config: _q.config}
nodes = append(nodes, node) nodes = append(nodes, node)
return node.assignValues(columns, values) return node.assignValues(columns, values)
} }
if len(dpq.modifiers) > 0 { if len(_q.modifiers) > 0 {
_spec.Modifiers = dpq.modifiers _spec.Modifiers = _q.modifiers
} }
for i := range hooks { for i := range hooks {
hooks[i](ctx, _spec) hooks[i](ctx, _spec)
} }
if err := sqlgraph.QueryNodes(ctx, dpq.driver, _spec); err != nil { if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
return nil, err return nil, err
} }
if len(nodes) == 0 { if len(nodes) == 0 {
@@ -360,27 +360,27 @@ func (dpq *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*D
return nodes, nil return nodes, nil
} }
func (dpq *DBPackageQuery) sqlCount(ctx context.Context) (int, error) { func (_q *DBPackageQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dpq.querySpec() _spec := _q.querySpec()
if len(dpq.modifiers) > 0 { if len(_q.modifiers) > 0 {
_spec.Modifiers = dpq.modifiers _spec.Modifiers = _q.modifiers
} }
_spec.Node.Columns = dpq.ctx.Fields _spec.Node.Columns = _q.ctx.Fields
if len(dpq.ctx.Fields) > 0 { if len(_q.ctx.Fields) > 0 {
_spec.Unique = dpq.ctx.Unique != nil && *dpq.ctx.Unique _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
} }
return sqlgraph.CountNodes(ctx, dpq.driver, _spec) return sqlgraph.CountNodes(ctx, _q.driver, _spec)
} }
func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec { func (_q *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) _spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
_spec.From = dpq.sql _spec.From = _q.sql
if unique := dpq.ctx.Unique; unique != nil { if unique := _q.ctx.Unique; unique != nil {
_spec.Unique = *unique _spec.Unique = *unique
} else if dpq.path != nil { } else if _q.path != nil {
_spec.Unique = true _spec.Unique = true
} }
if fields := dpq.ctx.Fields; len(fields) > 0 { if fields := _q.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, dbpackage.FieldID) _spec.Node.Columns = append(_spec.Node.Columns, dbpackage.FieldID)
for i := range fields { for i := range fields {
@@ -389,20 +389,20 @@ func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
} }
} }
} }
if ps := dpq.predicates; len(ps) > 0 { if ps := _q.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) { _spec.Predicate = func(selector *sql.Selector) {
for i := range ps { for i := range ps {
ps[i](selector) ps[i](selector)
} }
} }
} }
if limit := dpq.ctx.Limit; limit != nil { if limit := _q.ctx.Limit; limit != nil {
_spec.Limit = *limit _spec.Limit = *limit
} }
if offset := dpq.ctx.Offset; offset != nil { if offset := _q.ctx.Offset; offset != nil {
_spec.Offset = *offset _spec.Offset = *offset
} }
if ps := dpq.order; len(ps) > 0 { if ps := _q.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) { _spec.Order = func(selector *sql.Selector) {
for i := range ps { for i := range ps {
ps[i](selector) ps[i](selector)
@@ -412,45 +412,45 @@ func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
return _spec return _spec
} }
func (dpq *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector { func (_q *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dpq.driver.Dialect()) builder := sql.Dialect(_q.driver.Dialect())
t1 := builder.Table(dbpackage.Table) t1 := builder.Table(dbpackage.Table)
columns := dpq.ctx.Fields columns := _q.ctx.Fields
if len(columns) == 0 { if len(columns) == 0 {
columns = dbpackage.Columns columns = dbpackage.Columns
} }
selector := builder.Select(t1.Columns(columns...)...).From(t1) selector := builder.Select(t1.Columns(columns...)...).From(t1)
if dpq.sql != nil { if _q.sql != nil {
selector = dpq.sql selector = _q.sql
selector.Select(selector.Columns(columns...)...) selector.Select(selector.Columns(columns...)...)
} }
if dpq.ctx.Unique != nil && *dpq.ctx.Unique { if _q.ctx.Unique != nil && *_q.ctx.Unique {
selector.Distinct() selector.Distinct()
} }
for _, m := range dpq.modifiers { for _, m := range _q.modifiers {
m(selector) m(selector)
} }
for _, p := range dpq.predicates { for _, p := range _q.predicates {
p(selector) p(selector)
} }
for _, p := range dpq.order { for _, p := range _q.order {
p(selector) p(selector)
} }
if offset := dpq.ctx.Offset; offset != nil { if offset := _q.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start // limit is mandatory for offset clause. We start
// with default value, and override it below if needed. // with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32) selector.Offset(*offset).Limit(math.MaxInt32)
} }
if limit := dpq.ctx.Limit; limit != nil { if limit := _q.ctx.Limit; limit != nil {
selector.Limit(*limit) selector.Limit(*limit)
} }
return selector return selector
} }
// Modify adds a query modifier for attaching custom logic to queries. // Modify adds a query modifier for attaching custom logic to queries.
func (dpq *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect { func (_q *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
dpq.modifiers = append(dpq.modifiers, modifiers...) _q.modifiers = append(_q.modifiers, modifiers...)
return dpq.Select() return _q.Select()
} }
// DBPackageGroupBy is the group-by builder for DBPackage entities. // DBPackageGroupBy is the group-by builder for DBPackage entities.
@@ -460,41 +460,41 @@ type DBPackageGroupBy struct {
} }
// Aggregate adds the given aggregation functions to the group-by query. // Aggregate adds the given aggregation functions to the group-by query.
func (dpgb *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy { func (_g *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy {
dpgb.fns = append(dpgb.fns, fns...) _g.fns = append(_g.fns, fns...)
return dpgb return _g
} }
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (dpgb *DBPackageGroupBy) Scan(ctx context.Context, v any) error { func (_g *DBPackageGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dpgb.build.ctx, ent.OpQueryGroupBy) ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
if err := dpgb.build.prepareQuery(ctx); err != nil { if err := _g.build.prepareQuery(ctx); err != nil {
return err return err
} }
return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v) return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, _g.build, _g, _g.build.inters, v)
} }
func (dpgb *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error { func (_g *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
selector := root.sqlQuery(ctx).Select() selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dpgb.fns)) aggregation := make([]string, 0, len(_g.fns))
for _, fn := range dpgb.fns { for _, fn := range _g.fns {
aggregation = append(aggregation, fn(selector)) aggregation = append(aggregation, fn(selector))
} }
if len(selector.SelectedColumns()) == 0 { if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*dpgb.flds)+len(dpgb.fns)) columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
for _, f := range *dpgb.flds { for _, f := range *_g.flds {
columns = append(columns, selector.C(f)) columns = append(columns, selector.C(f))
} }
columns = append(columns, aggregation...) columns = append(columns, aggregation...)
selector.Select(columns...) selector.Select(columns...)
} }
selector.GroupBy(selector.Columns(*dpgb.flds...)...) selector.GroupBy(selector.Columns(*_g.flds...)...)
if err := selector.Err(); err != nil { if err := selector.Err(); err != nil {
return err return err
} }
rows := &sql.Rows{} rows := &sql.Rows{}
query, args := selector.Query() query, args := selector.Query()
if err := dpgb.build.driver.Query(ctx, query, args, rows); err != nil { if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
return err return err
} }
defer rows.Close() defer rows.Close()
@@ -508,27 +508,27 @@ type DBPackageSelect struct {
} }
// Aggregate adds the given aggregation functions to the selector query. // Aggregate adds the given aggregation functions to the selector query.
func (dps *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect { func (_s *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
dps.fns = append(dps.fns, fns...) _s.fns = append(_s.fns, fns...)
return dps return _s
} }
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (dps *DBPackageSelect) Scan(ctx context.Context, v any) error { func (_s *DBPackageSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dps.ctx, ent.OpQuerySelect) ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
if err := dps.prepareQuery(ctx); err != nil { if err := _s.prepareQuery(ctx); err != nil {
return err return err
} }
return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, dps.DBPackageQuery, dps, dps.inters, v) return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, _s.DBPackageQuery, _s, _s.inters, v)
} }
func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error { func (_s *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
selector := root.sqlQuery(ctx) selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(dps.fns)) aggregation := make([]string, 0, len(_s.fns))
for _, fn := range dps.fns { for _, fn := range _s.fns {
aggregation = append(aggregation, fn(selector)) aggregation = append(aggregation, fn(selector))
} }
switch n := len(*dps.selector.flds); { switch n := len(*_s.selector.flds); {
case n == 0 && len(aggregation) > 0: case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...) selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0: case n != 0 && len(aggregation) > 0:
@@ -536,7 +536,7 @@ func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v
} }
rows := &sql.Rows{} rows := &sql.Rows{}
query, args := selector.Query() query, args := selector.Query()
if err := dps.driver.Query(ctx, query, args, rows); err != nil { if err := _s.driver.Query(ctx, query, args, rows); err != nil {
return err return err
} }
defer rows.Close() defer rows.Close()
@@ -544,7 +544,7 @@ func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v
} }
// Modify adds a query modifier for attaching custom logic to queries. // Modify adds a query modifier for attaching custom logic to queries.
func (dps *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect { func (_s *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
dps.modifiers = append(dps.modifiers, modifiers...) _s.modifiers = append(_s.modifiers, modifiers...)
return dps return _s
} }

File diff suppressed because it is too large Load Diff

View File

@@ -70,13 +70,13 @@ var (
) )
// checkColumn checks if the column exists in the given table. // checkColumn checks if the column exists in the given table.
func checkColumn(table, column string) error { func checkColumn(t, c string) error {
initCheck.Do(func() { initCheck.Do(func() {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
dbpackage.Table: dbpackage.ValidColumn, dbpackage.Table: dbpackage.ValidColumn,
}) })
}) })
return columnCheck(table, column) return columnCheck(t, c)
} }
// Asc applies the given fields in ASC order. // Asc applies the given fields in ASC order.

View File

@@ -5,6 +5,6 @@ package runtime
// The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go // The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go
const ( const (
Version = "v0.14.1" // Version of ent codegen. Version = "v0.14.6" // Version of ent codegen.
Sum = "h1:fUERL506Pqr92EPHJqr8EYxbPioflJo6PudkrEA8a/s=" // Sum of ent codegen. Sum = "h1:/f2696BpwuWAEEG6PVGWflg6+Inrpq4pRWuNlWz/Skk=" // Sum of ent codegen.
) )

View File

@@ -20,13 +20,19 @@ common:
packager: "ALHP $march$ <alhp@harting.dev>" packager: "ALHP $march$ <alhp@harting.dev>"
makeflags: "-j$buildproc$" makeflags: "-j$buildproc$"
# https://somegit.dev/ALHP/ALHP.GO/issues/110 # https://somegit.dev/ALHP/ALHP.GO/issues/110
rustflags: "-Copt-level=3 -Ctarget-cpu=$march$ -Clink-arg=-z -Clink-arg=pack-relative-relocs" rustflags:
- "-Copt-level=3"
- "-Ctarget-cpu=$march$"
- "-Clink-arg=-z"
- "-Clink-arg=pack-relative-relocs"
ltoflags: ltoflags:
- "-falign-functions=32" # https://github.com/InBetweenNames/gentooLTO/issues/164 - "-falign-functions=32" # https://github.com/InBetweenNames/gentooLTO/issues/164
kcflags: " -march=$march$ -O3" kcflags: " -march=$march$ -O3"
kcppflags: " -march=$march$ -O3" kcppflags: " -march=$march$ -O3"
fcflags: "$CFLAGS" fcflags: "$FFLAGS"
fflags: "$CFLAGS" fflags:
- "-O2": "-O3"
- "-march=$march$"
lto: lto:
rustflags: rustflags:

51
go.mod
View File

@@ -1,48 +1,57 @@
module somegit.dev/ALHP/ALHP.GO module somegit.dev/ALHP/ALHP.GO
go 1.23.0 go 1.25.0
toolchain go1.23.1
require ( require (
entgo.io/ent v0.14.1 entgo.io/ent v0.14.6
github.com/Jguer/go-alpm/v2 v2.2.2 github.com/Jguer/go-alpm/v2 v2.3.1
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5
github.com/Morganamilo/go-srcinfo v1.0.0 github.com/Morganamilo/go-srcinfo v1.0.0
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
github.com/gobwas/glob v0.2.3 github.com/gobwas/glob v0.2.3
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/jackc/pgx/v4 v4.18.3 github.com/jackc/pgx/v4 v4.18.3
github.com/otiai10/copy v1.14.0 github.com/otiai10/copy v1.14.1
github.com/prometheus/client_golang v1.23.2
github.com/sethvargo/go-retry v0.3.0 github.com/sethvargo/go-retry v0.3.0
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.4
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117 github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
require ( require (
ariga.io/atlas v0.27.0 // indirect ariga.io/atlas v1.2.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bmatcuk/doublestar v1.3.4 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
github.com/go-openapi/inflect v0.21.0 // indirect github.com/go-openapi/inflect v0.21.5 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.7.0 // indirect
github.com/hashicorp/hcl/v2 v2.22.0 // indirect github.com/hashicorp/hcl/v2 v2.24.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgconn v1.14.3 // indirect
github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgproto3/v2 v2.3.3 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgtype v1.14.3 // indirect github.com/jackc/pgtype v1.14.4 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/zclconf/go-cty v1.15.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
golang.org/x/crypto v0.27.0 // indirect github.com/otiai10/mint v1.6.3 // indirect
golang.org/x/mod v0.21.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect
golang.org/x/sync v0.8.0 // indirect github.com/prometheus/common v0.67.5 // indirect
golang.org/x/sys v0.25.0 // indirect github.com/prometheus/procfs v0.20.1 // indirect
golang.org/x/text v0.18.0 // indirect github.com/zclconf/go-cty v1.18.0 // indirect
golang.org/x/tools v0.25.0 // indirect github.com/zclconf/go-cty-yaml v1.2.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect go.yaml.in/yaml/v2 v2.4.4 // indirect
golang.org/x/crypto v0.50.0 // indirect
golang.org/x/mod v0.35.0 // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/sys v0.43.0 // indirect
golang.org/x/text v0.36.0 // indirect
golang.org/x/tools v0.44.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

118
go.sum
View File

@@ -1,12 +1,12 @@
ariga.io/atlas v0.27.0 h1:UHUQMTRx2Vz8/acJxEfcC/zL2wY739/vkZzt7L8HL8I= ariga.io/atlas v1.2.0 h1:PJZzG1zTA8SzMR1giJ7XnQe4VjfDWEbJjHnIaK45BFg=
ariga.io/atlas v0.27.0/go.mod h1:KPLc7Zj+nzoXfWshrcY1RwlOh94dsATQEy4UPrF2RkM= ariga.io/atlas v1.2.0/go.mod h1:vg7qWSatkNqs04Y4Lheg7vR4bGX0wy51Wz4FIMFVr3U=
entgo.io/ent v0.14.1 h1:fUERL506Pqr92EPHJqr8EYxbPioflJo6PudkrEA8a/s= entgo.io/ent v0.14.6 h1:/f2696BpwuWAEEG6PVGWflg6+Inrpq4pRWuNlWz/Skk=
entgo.io/ent v0.14.1/go.mod h1:MH6XLG0KXpkcDQhKiHfANZSzR55TJyPL5IGNpI8wpco= entgo.io/ent v0.14.6/go.mod h1:z46QBUdGC+BATwsedbDuREfSS0oSCV+csdEYlL4p73s=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Jguer/go-alpm/v2 v2.2.2 h1:sPwUoZp1X5Tw6K6Ba1lWvVJfcgVNEGVcxARLBttZnC0= github.com/Jguer/go-alpm/v2 v2.3.1 h1:6xgyL28aI4E4gng/cH+kvCnpa95f4Qgmddmh8A7iTGk=
github.com/Jguer/go-alpm/v2 v2.2.2/go.mod h1:lfe8gSe83F/KERaQvEfrSqQ4n+8bES+ZIyKWR/gm3MI= github.com/Jguer/go-alpm/v2 v2.3.1/go.mod h1:lfe8gSe83F/KERaQvEfrSqQ4n+8bES+ZIyKWR/gm3MI=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c= github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c=
@@ -17,8 +17,14 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4= github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@@ -26,14 +32,13 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk= github.com/go-openapi/inflect v0.21.5 h1:M2RCq6PPS3YbIaL7CXosGL3BbzAcmfBAT0nC3YfesZA=
github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= github.com/go-openapi/inflect v0.21.5/go.mod h1:GypUyi6bU880NYurWaEH2CmH84zFDNd+EhhmzroHmB4=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
@@ -41,13 +46,13 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE=
github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM=
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
@@ -86,8 +91,8 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
github.com/jackc/pgtype v1.14.3 h1:h6W9cPuHsRWQFTWUZMAKMgG5jSwQI0Zurzdvlx3Plus= github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
github.com/jackc/pgtype v1.14.3/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
@@ -100,16 +105,20 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@@ -120,21 +129,31 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc=
github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
@@ -146,8 +165,8 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -161,20 +180,24 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117 h1:67A5tweHp3C7osHjrYsy6pQZ00bYkTTttZ7kiOwwHeA= github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117 h1:67A5tweHp3C7osHjrYsy6pQZ00bYkTTttZ7kiOwwHeA=
github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg= github.com/wercker/journalhook v0.0.0-20230927020745-64542ffa4117/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= github.com/zclconf/go-cty v1.18.0 h1:pJ8+HNI4gFoyRNqVE37wWbJWVw43BZczFo7KUoRczaA=
github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty v1.18.0/go.mod h1:qpnV6EDNgC1sns/AleL1fvatHw72j+S+nS+MJ+T2CSg=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
github.com/zclconf/go-cty-yaml v1.2.0 h1:GDyL4+e/Qe/S0B7YaecMLbVvAR/Mp21CXMOSiCTOi1M=
github.com/zclconf/go-cty-yaml v1.2.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@@ -182,6 +205,8 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ=
go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -194,15 +219,15 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -215,8 +240,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -230,13 +255,12 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -252,8 +276,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -265,17 +289,19 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= golang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c=
golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -13,7 +13,7 @@ import (
"time" "time"
) )
func housekeeping(repo, march string, wg *sync.WaitGroup) error { func housekeeping(ctx context.Context, repo, march string, wg *sync.WaitGroup) error {
defer wg.Done() defer wg.Done()
fullRepo := repo + "-" + march fullRepo := repo + "-" + march
log.Debugf("[%s] start housekeeping", fullRepo) log.Debugf("[%s] start housekeeping", fullRepo)
@@ -26,7 +26,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
for _, path := range packages { for _, path := range packages {
mPackage := Package(path) mPackage := Package(path)
dbPkg, err := mPackage.DBPackage(db) dbPkg, err := mPackage.DBPackage(ctx, db)
if ent.IsNotFound(err) { if ent.IsNotFound(err) {
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path)) log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
pkg := &ProtoPackage{ pkg := &ProtoPackage{
@@ -50,12 +50,6 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
Arch: *mPackage.Arch(), Arch: *mPackage.Arch(),
} }
matchNoBuild, err := MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages)
if err != nil {
log.Errorf("[HK] %s->%s error parsing no-build glob: %v", pkg.FullRepo, mPackage.Name(), err)
continue
}
// check if package is still part of repo // check if package is still part of repo
dbs, err := alpmHandle.SyncDBs() dbs, err := alpmHandle.SyncDBs()
if err != nil { if err != nil {
@@ -69,7 +63,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
pkgResolved.DB().Name() != pkg.Repo.String() || pkgResolved.DB().Name() != pkg.Repo.String() ||
pkgResolved.Architecture() != pkg.Arch || pkgResolved.Architecture() != pkg.Arch ||
pkgResolved.Name() != mPackage.Name() || pkgResolved.Name() != mPackage.Name() ||
matchNoBuild { MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages) {
switch { switch {
case err != nil: case err != nil:
log.Infof("[HK] %s->%s not included in repo (resolve error: %v)", pkg.FullRepo, mPackage.Name(), err) log.Infof("[HK] %s->%s not included in repo (resolve error: %v)", pkg.FullRepo, mPackage.Name(), err)
@@ -85,7 +79,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
case pkgResolved.Name() != mPackage.Name(): case pkgResolved.Name() != mPackage.Name():
log.Infof("[HK] %s->%s not included in repo (name mismatch: repo:%s != pkg:%s)", pkg.FullRepo, log.Infof("[HK] %s->%s not included in repo (name mismatch: repo:%s != pkg:%s)", pkg.FullRepo,
mPackage.Name(), pkgResolved.Name(), mPackage.Name()) mPackage.Name(), pkgResolved.Name(), mPackage.Name())
case matchNoBuild: case MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
log.Infof("[HK] %s->%s not included in repo (blacklisted pkgbase %s)", pkg.FullRepo, mPackage.Name(), pkg.Pkgbase) log.Infof("[HK] %s->%s not included in repo (blacklisted pkgbase %s)", pkg.FullRepo, mPackage.Name(), pkg.Pkgbase)
} }
@@ -95,7 +89,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
log.Errorf("[HK] %s->%s unable to get pkg-files: %v", pkg.FullRepo, mPackage.Name(), err) log.Errorf("[HK] %s->%s unable to get pkg-files: %v", pkg.FullRepo, mPackage.Name(), err)
continue continue
} }
err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(context.Background()) err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(ctx)
pkg.DBPackage = nil pkg.DBPackage = nil
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
if err != nil { if err != nil {
@@ -105,7 +99,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
} }
if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) { if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) {
err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(context.Background()) err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -153,7 +147,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
DBPackage: dbPkg, DBPackage: dbPkg,
} }
if !pkg.isAvailable(alpmHandle) { if !pkg.isAvailable(ctx, alpmHandle) {
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase) log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background()) err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
if err != nil { if err != nil {
@@ -167,9 +161,11 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
// check lastVersionBuild // check lastVersionBuild
if dbPkg.LastVersionBuild != dbPkg.RepoVersion { if dbPkg.LastVersionBuild != dbPkg.RepoVersion {
log.Infof("[HK] %s->%s updating lastVersionBuild %s -> %s", fullRepo, dbPkg.Pkgbase, dbPkg.LastVersionBuild, dbPkg.RepoVersion) log.Infof("[HK] %s->%s updating lastVersionBuild %s -> %s", fullRepo, dbPkg.Pkgbase, dbPkg.LastVersionBuild, dbPkg.RepoVersion)
dbPkg, err = dbPkg.Update().SetLastVersionBuild(dbPkg.RepoVersion).Save(context.Background()) nDBPkg, err := dbPkg.Update().SetLastVersionBuild(dbPkg.RepoVersion).Save(ctx)
if err != nil { if err != nil {
log.Warningf("[HK] error updating lastVersionBuild for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err) log.Warningf("[HK] error updating lastVersionBuild for %s->%s: %v", fullRepo, dbPkg.Pkgbase, err)
} else {
dbPkg = nDBPkg
} }
} }
@@ -194,7 +190,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
ClearRepoVersion(). ClearRepoVersion().
ClearTagRev(). ClearTagRev().
SetStatus(dbpackage.StatusQueued). SetStatus(dbpackage.StatusQueued).
Save(context.Background()) Save(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -230,14 +226,14 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
if dbPkg.TagRev != nil && state.TagRev == *dbPkg.TagRev && state.PkgVer != dbPkg.Version { if dbPkg.TagRev != nil && state.TagRev == *dbPkg.TagRev && state.PkgVer != dbPkg.Version {
log.Infof("[HK] reseting package %s->%s with mismatched state information (%s!=%s)", log.Infof("[HK] reseting package %s->%s with mismatched state information (%s!=%s)",
fullRepo, dbPkg.Pkgbase, state.PkgVer, dbPkg.Version) fullRepo, dbPkg.Pkgbase, state.PkgVer, dbPkg.Version)
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().Exec(context.Background()) err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().Exec(ctx)
if err != nil { if err != nil {
return err return err
} }
} }
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "": case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase) log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(context.Background()) err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -249,6 +245,12 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
DBPackage: dbPkg, DBPackage: dbPkg,
} }
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg} buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.SkipReason == "blacklisted" && !MatchGlobList(pkg.Pkgbase, conf.Blacklist.Packages):
log.Infof("[HK] requeue previously blacklisted package %s->%s", fullRepo, dbPkg.Pkgbase)
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearSkipReason().ClearTagRev().Exec(context.Background())
if err != nil {
return err
}
case dbPkg.Status == dbpackage.StatusFailed && dbPkg.RepoVersion != "": case dbPkg.Status == dbpackage.StatusFailed && dbPkg.RepoVersion != "":
log.Infof("[HK] package %s->%s failed but still present in repo, removing", fullRepo, dbPkg.Pkgbase) log.Infof("[HK] package %s->%s failed but still present in repo, removing", fullRepo, dbPkg.Pkgbase)
pkg := &ProtoPackage{ pkg := &ProtoPackage{
@@ -264,7 +266,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
return nil return nil
} }
func logHK() error { func logHK(ctx context.Context) error {
// check if package for log exists and if error can be fixed by rebuild // check if package for log exists and if error can be fixed by rebuild
logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log")) logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log"))
if err != nil { if err != nil {
@@ -293,7 +295,7 @@ func logHK() error {
dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.Pkgbase(pkg.Pkgbase),
dbpackage.March(pkg.March), dbpackage.March(pkg.March),
dbpackage.StatusEQ(dbpackage.StatusSkipped), dbpackage.StatusEQ(dbpackage.StatusSkipped),
).Exist(context.Background()) ).Exist(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -312,7 +314,7 @@ func logHK() error {
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) || if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) ||
reDownloadError2.MatchString(sLogContent) { reDownloadError2.MatchString(sLogContent) {
rows, err := db.DBPackage.Update().Where(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March), rows, err := db.DBPackage.Update().Where(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
dbpackage.StatusEQ(dbpackage.StatusFailed)).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(context.Background()) dbpackage.StatusEQ(dbpackage.StatusFailed)).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -326,7 +328,7 @@ func logHK() error {
dbpackage.March(pkg.March), dbpackage.March(pkg.March),
dbpackage.StatusEQ(dbpackage.StatusFailed), dbpackage.StatusEQ(dbpackage.StatusFailed),
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled), dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background()) ).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -343,7 +345,7 @@ func debugHK() {
for _, march := range conf.March { for _, march := range conf.March {
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march)); err == nil { if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march)); err == nil {
log.Debugf("[DHK/%s] start cleanup debug packages", march) log.Debugf("[DHK/%s] start cleanup debug packages", march)
cleanCmd := exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Debug, march), "-k", "1") //nolint:gosec cleanCmd := exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Debug, march), "-k", "1")
res, err := cleanCmd.CombinedOutput() res, err := cleanCmd.CombinedOutput()
if err != nil { if err != nil {
log.Warningf("[DHK/%s] cleanup debug packages failed: %v (%s)", march, err, string(res)) log.Warningf("[DHK/%s] cleanup debug packages failed: %v (%s)", march, err, string(res))

11
main.go
View File

@@ -86,7 +86,8 @@ func main() {
}(db) }(db)
} }
if err := db.Schema.Create(context.Background(), migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil { ctx, cancel := context.WithCancel(context.Background())
if err := db.Schema.Create(ctx, migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
log.Panicf("automigrate failed: %v", err) log.Panicf("automigrate failed: %v", err)
} }
@@ -100,11 +101,13 @@ func main() {
repoWG: new(sync.WaitGroup), repoWG: new(sync.WaitGroup),
} }
err = setupChroot() buildManager.setupMetrics(conf.Metrics.Port)
err = setupChroot(ctx)
if err != nil { if err != nil {
log.Panicf("unable to setup chroot: %v", err) log.Panicf("unable to setup chroot: %v", err)
} }
err = syncMarchs() err = syncMarchs(ctx)
if err != nil { if err != nil {
log.Panicf("error syncing marchs: %v", err) log.Panicf("error syncing marchs: %v", err)
} }
@@ -115,8 +118,6 @@ func main() {
log.Panicf("error while ALPM-init: %v", err) log.Panicf("error while ALPM-init: %v", err)
} }
ctx, cancel := context.WithCancel(context.Background())
go func() { go func() {
_ = buildManager.syncWorker(ctx) _ = buildManager.syncWorker(ctx)
}() }()

26
metrics.go Normal file
View File

@@ -0,0 +1,26 @@
package main
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"net/http"
)
func (b *BuildManager) setupMetrics(port uint32) {
b.metrics.queueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "build_queue_size",
Help: "Build queue size",
}, []string{"repository", "status"})
mux := http.NewServeMux()
mux.Handle("/", promhttp.Handler())
go func() {
err := http.ListenAndServe(fmt.Sprintf(":%d", port), mux) //nolint:gosec
if err != nil {
log.Errorf("failed to start metrics server: %v", err)
}
}()
}

View File

@@ -70,12 +70,12 @@ func (pkg Package) HasValidSignature() (bool, error) {
} }
// DBPackage returns ent.DBPackage for package // DBPackage returns ent.DBPackage for package
func (pkg Package) DBPackage(db *ent.Client) (*ent.DBPackage, error) { func (pkg Package) DBPackage(ctx context.Context, db *ent.Client) (*ent.DBPackage, error) {
return pkg.DBPackageIsolated(*pkg.MArch(), pkg.Repo(), db) return pkg.DBPackageIsolated(ctx, *pkg.MArch(), pkg.Repo(), db)
} }
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo // DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) { func (pkg Package) DBPackageIsolated(ctx context.Context, march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) { dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
s.Where( s.Where(
sql.And( sql.And(
@@ -83,7 +83,7 @@ func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db
sql.EQ(dbpackage.FieldMarch, march), sql.EQ(dbpackage.FieldMarch, march),
sql.EQ(dbpackage.FieldRepository, repo)), sql.EQ(dbpackage.FieldRepository, repo)),
) )
}).Only(context.Background()) }).Only(ctx)
if ent.IsNotFound(err) { if ent.IsNotFound(err) {
log.Debugf("not found in database: %s", pkg.Name()) log.Debugf("not found in database: %s", pkg.Name())
return nil, err return nil, err

View File

@@ -43,11 +43,6 @@ var (
) )
func (p *ProtoPackage) isEligible(ctx context.Context) bool { func (p *ProtoPackage) isEligible(ctx context.Context) bool {
globMatch, err := MatchGlobList(p.Pkgbase, conf.Blacklist.Packages)
if err != nil {
log.Errorf("error parsing glob from no-build list: %v", err)
}
skipping := false skipping := false
switch { switch {
case p.Arch == "any": case p.Arch == "any":
@@ -55,13 +50,13 @@ func (p *ProtoPackage) isEligible(ctx context.Context) bool {
p.DBPackage.SkipReason = "arch = any" p.DBPackage.SkipReason = "arch = any"
p.DBPackage.Status = dbpackage.StatusSkipped p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true skipping = true
case globMatch: case MatchGlobList(p.Pkgbase, conf.Blacklist.Packages):
log.Debugf("skipped %s: package on no-build list", p.Pkgbase) log.Debugf("skipped %s: package on no-build list", p.Pkgbase)
p.DBPackage.SkipReason = "blacklisted" p.DBPackage.SkipReason = "blacklisted"
p.DBPackage.Status = dbpackage.StatusSkipped p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true skipping = true
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: //nolint:gosec
log.Debugf("skipped %s: memory limit exceeded (%s)", p.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) log.Debugf("skipped %s: memory limit exceeded (%s)", p.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) //nolint:gosec
p.DBPackage.SkipReason = "memory limit exceeded" p.DBPackage.SkipReason = "memory limit exceeded"
p.DBPackage.Status = dbpackage.StatusSkipped p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true skipping = true
@@ -210,17 +205,29 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec
"-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config", "-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config",
filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March))) filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd.Dir = filepath.Dir(p.Pkgbuild) cmd.Dir = filepath.Dir(p.Pkgbuild)
var out bytes.Buffer var out bytes.Buffer
cmd.Stdout = &out cmd.Stdout = &out
cmd.Stderr = &out cmd.Stderr = &out
err = cmd.Start() if err = cmd.Start(); err != nil {
if err != nil {
return time.Since(start), fmt.Errorf("error starting build: %w", err) return time.Since(start), fmt.Errorf("error starting build: %w", err)
} }
pgid, err := syscall.Getpgid(cmd.Process.Pid)
if err != nil {
log.Errorf("error getting PGID: %v", err)
}
done := make(chan bool)
result := make(chan int64)
go pollMemoryUsage(pgid, 1*time.Second, done, result)
err = cmd.Wait() err = cmd.Wait()
close(done)
peakMem := <-result
close(result)
Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage) Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage)
if !ok { if !ok {
@@ -320,7 +327,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
SetBuildTimeStart(start). SetBuildTimeStart(start).
SetLastVersionBuild(p.Version). SetLastVersionBuild(p.Version).
SetTagRev(p.State.TagRev). SetTagRev(p.State.TagRev).
SetMaxRss(Rusage.Maxrss). SetMaxRss(peakMem).
SetIoOut(Rusage.Oublock). SetIoOut(Rusage.Oublock).
SetIoIn(Rusage.Inblock). SetIoIn(Rusage.Inblock).
SetUTime(Rusage.Utime.Sec). SetUTime(Rusage.Utime.Sec).
@@ -470,7 +477,7 @@ func (p *ProtoPackage) importKeys() error {
return nil return nil
} }
func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool { func (p *ProtoPackage) isAvailable(ctx context.Context, h *alpm.Handle) bool {
dbs, err := h.SyncDBs() dbs, err := h.SyncDBs()
if err != nil { if err != nil {
return false return false
@@ -486,7 +493,7 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
case p.DBPackage != nil && len(p.DBPackage.Packages) > 0: case p.DBPackage != nil && len(p.DBPackage.Packages) > 0:
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0]) pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
default: default:
cmd := exec.Command("unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String(), //nolint:gosec cmd := exec.CommandContext(ctx, "unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String(), //nolint:gosec
"--sysroot="+filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot)) "--sysroot="+filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
var res []byte var res []byte
res, err = cmd.Output() res, err = cmd.Output()
@@ -494,18 +501,12 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err) log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
return false return false
} else if len(res) == 0 { } else if len(res) == 0 {
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
return false return false
} }
// workaround for https://github.com/andrewgregory/pacutils/issues/66 if len(strings.Split(strings.TrimSpace(string(res)), "\n")) > 0 {
// TODO: remove once fixed pacsiftLines := strings.Split(strings.TrimSpace(string(res)), "\n")
rRes := reReplacePacsiftWarning.ReplaceAllString(string(res), "")
if strings.TrimSpace(rRes) == "" {
return false
}
if len(strings.Split(strings.TrimSpace(rRes), "\n")) > 0 {
pacsiftLines := strings.Split(strings.TrimSpace(rRes), "\n")
var splitPkgs []string var splitPkgs []string
for _, line := range pacsiftLines { for _, line := range pacsiftLines {
@@ -513,7 +514,10 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
} }
if p.DBPackage != nil { if p.DBPackage != nil {
p.DBPackage = p.DBPackage.Update().SetPackages(splitPkgs).SaveX(context.Background()) p.DBPackage, err = p.DBPackage.Update().SetPackages(splitPkgs).Save(ctx)
if err != nil {
return false
}
} }
pkg, err = dbs.FindSatisfier(splitPkgs[0]) pkg, err = dbs.FindSatisfier(splitPkgs[0])
} else { } else {
@@ -656,8 +660,8 @@ func (p *ProtoPackage) findPkgFiles() error {
if p.DBPackage != nil { if p.DBPackage != nil {
realPkgs = append(realPkgs, p.DBPackage.Packages...) realPkgs = append(realPkgs, p.DBPackage.Packages...)
} else { } else {
for _, realPkg := range p.Srcinfo.Packages { for i := range p.Srcinfo.Packages {
realPkgs = append(realPkgs, realPkg.Pkgname) realPkgs = append(realPkgs, p.Srcinfo.Packages[i].Pkgname)
} }
} }
@@ -676,7 +680,7 @@ func (p *ProtoPackage) findPkgFiles() error {
return nil return nil
} }
func (p *ProtoPackage) toDBPackage(create bool) error { func (p *ProtoPackage) toDBPackage(ctx context.Context, create bool) error {
if p.DBPackage != nil { if p.DBPackage != nil {
return nil return nil
} }
@@ -685,13 +689,16 @@ func (p *ProtoPackage) toDBPackage(create bool) error {
dbpackage.Pkgbase(p.Pkgbase), dbpackage.Pkgbase(p.Pkgbase),
dbpackage.March(p.March), dbpackage.March(p.March),
dbpackage.RepositoryEQ(p.Repo), dbpackage.RepositoryEQ(p.Repo),
).Only(context.Background()) ).Only(ctx)
if err != nil && ent.IsNotFound(err) && create { if err != nil && ent.IsNotFound(err) && create {
dbPkg = db.DBPackage.Create(). dbPkg, err = db.DBPackage.Create().
SetPkgbase(p.Pkgbase). SetPkgbase(p.Pkgbase).
SetMarch(p.March). SetMarch(p.March).
SetRepository(p.Repo). SetRepository(p.Repo).
SaveX(context.Background()) Save(ctx)
if err != nil {
return err
}
} else if err != nil && !ent.IsNotFound(err) { } else if err != nil && !ent.IsNotFound(err) {
return err return err
} }

View File

@@ -97,7 +97,7 @@ package() {
` `
func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
pkgbuild, err := os.CreateTemp("", "") pkgbuild, err := os.CreateTemp(t.TempDir(), "")
if err != nil { if err != nil {
t.Fatal("unable to setup temp. PKGBUILD") t.Fatal("unable to setup temp. PKGBUILD")
} }
@@ -142,7 +142,7 @@ func TestIncreasePkgRel(t *testing.T) { //nolint:paralleltest
} }
func TestIncreasePkgRelWithPkgSub(t *testing.T) { //nolint:paralleltest func TestIncreasePkgRelWithPkgSub(t *testing.T) { //nolint:paralleltest
pkgbuild, err := os.CreateTemp("", "") pkgbuild, err := os.CreateTemp(t.TempDir(), "")
if err != nil { if err != nil {
t.Fatal("unable to setup temp. PKGBUILD") t.Fatal("unable to setup temp. PKGBUILD")
} }

219
utils.go
View File

@@ -1,6 +1,7 @@
package main package main
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"github.com/Jguer/go-alpm/v2" "github.com/Jguer/go-alpm/v2"
@@ -26,6 +27,7 @@ import (
const ( const (
pacmanConf = "/usr/share/devtools/pacman.conf.d/multilib.conf" pacmanConf = "/usr/share/devtools/pacman.conf.d/multilib.conf"
makepkgConf = "/usr/share/devtools/makepkg.conf.d/x86_64.conf" makepkgConf = "/usr/share/devtools/makepkg.conf.d/x86_64.conf"
makepkgConfExt = "/etc/makepkg.conf.d"
logDir = "logs" logDir = "logs"
pristineChroot = "root" pristineChroot = "root"
buildDir = "build" buildDir = "build"
@@ -85,6 +87,9 @@ type Conf struct {
Interval string Interval string
} }
MaxCloneRetries uint64 `yaml:"max_clone_retries"` MaxCloneRetries uint64 `yaml:"max_clone_retries"`
Metrics struct {
Port uint32
}
} }
type Globs []string type Globs []string
@@ -140,7 +145,7 @@ func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize {
var sum uint64 var sum uint64
for _, pkg := range pkgList { for _, pkg := range pkgList {
if pkg.DBPackage.MaxRss != nil { if pkg.DBPackage.MaxRss != nil {
sum += uint64(*pkg.DBPackage.MaxRss) sum += uint64(*pkg.DBPackage.MaxRss) //nolint:gosec
} }
} }
@@ -166,7 +171,7 @@ func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string,
return return
} }
func movePackagesLive(fullRepo string) error { func movePackagesLive(ctx context.Context, fullRepo string) error {
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) { if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
return nil return nil
} else if err != nil { } else if err != nil {
@@ -186,7 +191,7 @@ func movePackagesLive(fullRepo string) error {
for _, file := range pkgFiles { for _, file := range pkgFiles {
pkg := Package(file) pkg := Package(file)
dbPkg, err := pkg.DBPackageIsolated(march, dbpackage.Repository(repo), db) dbPkg, err := pkg.DBPackageIsolated(ctx, march, dbpackage.Repository(repo), db)
if err != nil { if err != nil {
if strings.HasSuffix(pkg.Name(), "-debug") { if strings.HasSuffix(pkg.Name(), "-debug") {
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0o755) mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0o755)
@@ -264,8 +269,8 @@ func packages2slice(pkgs any) []string {
switch v := pkgs.(type) { switch v := pkgs.(type) {
case []srcinfo.Package: case []srcinfo.Package:
var sPkgs []string var sPkgs []string
for _, p := range v { for i := range v {
sPkgs = append(sPkgs, p.Pkgname) sPkgs = append(sPkgs, v[i].Pkgname)
} }
return sPkgs return sPkgs
@@ -328,36 +333,46 @@ func initALPM(root, dbpath string) (*alpm.Handle, error) {
return h, nil return h, nil
} }
func setupChroot() error { func setupChroot(ctx context.Context) error {
_, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot)) _, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
switch { switch {
case err == nil: case err == nil:
cmd := exec.Command("arch-nspawn", "-C", pacmanConf, filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), //nolint:gosec cmd := exec.CommandContext(ctx, "arch-nspawn", "-C", pacmanConf, //nolint:gosec
"pacman", "-Syuu", "--noconfirm") filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "pacman", "-Syuu", "--noconfirm")
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
return fmt.Errorf("error updating chroot: %w\n%s", err, string(res)) return fmt.Errorf("error updating chroot: %w: %s", err, string(res))
} }
case os.IsNotExist(err): case os.IsNotExist(err):
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0o755) err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0o755)
if err != nil { if err != nil {
return err return err
} }
cmd := exec.Command("mkarchroot", "-C", pacmanConf, "-M", makepkgConf, //nolint:gosec cmd := exec.CommandContext(ctx, "mkarchroot", "-C", pacmanConf, "-M", makepkgConf, //nolint:gosec
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel", "multilib-devel") filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel", "multilib-devel")
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
return fmt.Errorf("error creating chroot: %w\n%s", err, string(res)) return fmt.Errorf("error creating chroot: %w: %s", err, string(res))
} }
cmd = exec.Command("sudo", "cp", pacmanConf, //nolint:gosec // copy pacman.conf into pristine chroot to enable multilib
cmd = exec.CommandContext(ctx, "sudo", "cp", pacmanConf, //nolint:gosec
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/pacman.conf")) filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/pacman.conf"))
res, err = cmd.CombinedOutput() res, err = cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
return fmt.Errorf("error copying pacman.conf to chroot: %w\n%s", err, string(res)) return fmt.Errorf("error copying pacman.conf to chroot: %w: %s", err, string(res))
}
// remove makepkg conf extension, they are covered by our custom makepkg
cmd = exec.CommandContext(ctx, "sudo", "rm_chroot.py", //nolint:gosec
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "etc/makepkg.conf.d"))
res, err = cmd.CombinedOutput()
log.Debug(string(res))
if err != nil {
return fmt.Errorf("error removing makepkg.conf.d from chroot: %w: %s", err, string(res))
} }
default: default:
return err return err
@@ -365,7 +380,7 @@ func setupChroot() error {
return nil return nil
} }
func syncMarchs() error { func syncMarchs(ctx context.Context) error {
files, err := os.ReadDir(conf.Basedir.Repo) files, err := os.ReadDir(conf.Basedir.Repo)
if err != nil { if err != nil {
return err return err
@@ -399,7 +414,7 @@ func syncMarchs() error {
repos = append(repos, fRepo) repos = append(repos, fRepo)
buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, 1000) buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, 1000)
buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 1000) buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 1000)
go buildManager.repoWorker(fRepo) go buildManager.repoWorker(ctx, fRepo)
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)); os.IsNotExist(err) { if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)); os.IsNotExist(err) {
log.Debugf("creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)) log.Debugf("creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))
@@ -482,6 +497,8 @@ func parseFlagSection(section any, makepkgConf, march string) (string, error) {
} }
if _, ok := subMap.(string); ok && len(orgMatch) > 0 { if _, ok := subMap.(string); ok && len(orgMatch) > 0 {
log.Debugf("replace %s with %s", orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf("\n%s=%s%s%s", makepkgConf = strings.ReplaceAll(makepkgConf, orgMatch[0], fmt.Sprintf("\n%s=%s%s%s",
strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4])) strings.ToUpper(subSec.(string)), orgMatch[2], replaceStringsFromMap(subMap.(string), replaceMap), orgMatch[4]))
continue continue
@@ -490,12 +507,18 @@ func parseFlagSection(section any, makepkgConf, march string) (string, error) {
if len(orgMatch) == 0 { if len(orgMatch) == 0 {
// no match found, assume env var and append it // no match found, assume env var and append it
log.Debugf("no match found for %s:%v, appending", subSec, subMap) log.Debugf("no match found for %s:%v, appending", subSec, subMap)
if strings.Contains(subMap.(string), " ") { switch sm := subMap.(type) {
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(subMap.(string), replaceMap)) case string:
if strings.Contains(sm, " ") {
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(sm, replaceMap))
continue
}
makepkgConf += fmt.Sprintf("\nexport %s=%s", strings.ToUpper(subSec.(string)), replaceStringsFromMap(sm, replaceMap))
continue
case []string:
makepkgConf += fmt.Sprintf("\nexport %s=%q", strings.ToUpper(subSec.(string)), replaceStringsFromMap(strings.Join(sm, " "), replaceMap)) //nolint:lll
continue continue
} }
makepkgConf += fmt.Sprintf("\nexport %s=%s", strings.ToUpper(subSec.(string)), replaceStringsFromMap(subMap.(string), replaceMap))
continue
} }
log.Debugf("original %s: %v (%d)", subSec, flags, len(flags)) log.Debugf("original %s: %v (%d)", subSec, flags, len(flags))
@@ -528,7 +551,24 @@ func setupMakepkg(march string, flags map[string]any) error {
if err != nil { if err != nil {
return err return err
} }
makepkgStr := string(t) makepkgStrBuilder := new(strings.Builder)
makepkgStrBuilder.Write(t)
// read makepkg conf.d
makepkgConfExt, err := Glob(filepath.Join(makepkgConfExt, "*.conf"))
if err != nil {
return err
}
for _, makepkgExt := range makepkgConfExt {
ext, err := os.ReadFile(makepkgExt)
if err != nil {
return err
}
makepkgStrBuilder.Write(ext)
}
makepkgStr := makepkgStrBuilder.String()
makepkgStr, err = parseFlagSection(flags["common"], makepkgStr, march) makepkgStr, err = parseFlagSection(flags["common"], makepkgStr, march)
if err != nil { if err != nil {
@@ -664,17 +704,18 @@ func (globs Globs) Expand() ([]string, error) {
return matches, nil return matches, nil
} }
func MatchGlobList(target string, globs []string) (bool, error) { func MatchGlobList(target string, globs []string) bool {
for _, lGlob := range globs { for _, lGlob := range globs {
tGlob, err := glob.Compile(lGlob) tGlob, err := glob.Compile(lGlob)
if err != nil { if err != nil {
return false, fmt.Errorf("failed to compile glob %s: %w", lGlob, err) log.Warningf("failed to compile glob %s: %v", lGlob, err)
return false
} }
if tGlob.Match(target) { if tGlob.Match(target) {
return true, nil return true
} }
} }
return false, nil return false
} }
func Copy(srcPath, dstPath string) (err error) { func Copy(srcPath, dstPath string) (err error) {
@@ -701,8 +742,10 @@ func Copy(srcPath, dstPath string) (err error) {
return err return err
} }
func downloadSRCINFO(pkg string, tag string) (*srcinfo.Srcinfo, error) { func downloadSRCINFO(pkg, tag string) (*srcinfo.Srcinfo, error) {
resp, err := http.Get(fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s/-/raw/%s/.SRCINFO", pkg, tag)) resp, err := http.Get(fmt.Sprintf(
"https://gitlab.archlinux.org/archlinux/packaging/packages/%s/-/raw/%s/.SRCINFO", pkg, tag),
)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -723,3 +766,127 @@ func downloadSRCINFO(pkg string, tag string) (*srcinfo.Srcinfo, error) {
} }
return nSrcInfo, nil return nSrcInfo, nil
} }
func getDescendantPIDs(rootPID int) ([]int, error) {
pidToPpid := map[int]int{}
var descendants []int
procEntries, err := os.ReadDir("/proc")
if err != nil {
return nil, err
}
for _, entry := range procEntries {
if !entry.IsDir() || entry.Name()[0] < '0' || entry.Name()[0] > '9' {
continue
}
pidStr := entry.Name()
pid, err := strconv.Atoi(pidStr)
if err != nil {
continue
}
statusPath := filepath.Join("/proc", pidStr, "status")
data, err := os.ReadFile(statusPath)
if err != nil {
continue
}
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, "PPid:") {
fields := strings.Fields(line)
if len(fields) == 2 {
ppid, _ := strconv.Atoi(fields[1])
pidToPpid[pid] = ppid
}
}
}
}
var walk func(int)
walk = func(current int) {
for pid, ppid := range pidToPpid {
if ppid == current {
descendants = append(descendants, pid)
walk(pid)
}
}
}
walk(rootPID)
return descendants, nil
}
type MemStats struct {
RSS int64
Swap int64
}
func getMemoryStats(pid int) (MemStats, error) {
statusPath := fmt.Sprintf("/proc/%d/status", pid)
data, err := os.ReadFile(statusPath)
if err != nil {
return MemStats{}, err
}
stats := MemStats{}
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, "VmRSS:") {
fields := strings.Fields(line)
if len(fields) >= 2 {
kb, _ := strconv.ParseInt(fields[1], 10, 64)
stats.RSS = kb
}
}
if strings.HasPrefix(line, "VmSwap:") {
fields := strings.Fields(line)
if len(fields) >= 2 {
kb, _ := strconv.ParseInt(fields[1], 10, 64)
stats.Swap = kb
}
}
}
return stats, nil
}
func pollMemoryUsage(pid int, interval time.Duration, done chan bool, result chan int64) {
var totalMemory int64
for {
select {
case <-done:
result <- totalMemory
return
default:
var totalRSS, totalSwap int64
rootStats, err := getMemoryStats(pid)
if err == nil {
totalRSS += rootStats.RSS
totalSwap += rootStats.Swap
} else {
log.Errorf("failed to get memory stats for root process: %v", err)
}
descendants, err := getDescendantPIDs(pid)
if err != nil {
log.Errorf("failed to get descendants: %v", err)
}
for _, dpid := range descendants {
stats, err := getMemoryStats(dpid)
if err == nil {
totalRSS += stats.RSS
totalSwap += stats.Swap
}
}
newMemory := totalRSS + totalSwap
if newMemory > totalMemory {
totalMemory = newMemory
}
time.Sleep(interval)
}
}
}