1
0
forked from ALHP/ALHP.GO

Git-based package source layout (#193)

Co-authored-by: Giovanni Harting <539@idlegandalf.com>
Reviewed-on: ALHP/ALHP.GO#193
This commit is contained in:
2023-05-21 20:28:23 +02:00
parent 786b70b842
commit fa902fa68c
25 changed files with 1541 additions and 2386 deletions

View File

@@ -2,6 +2,7 @@ package main
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
"github.com/sethvargo/go-retry" "github.com/sethvargo/go-retry"
@@ -10,7 +11,6 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"somegit.dev/ALHP/ALHP.GO/ent" "somegit.dev/ALHP/ALHP.GO/ent"
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage" "somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
"strings" "strings"
@@ -30,91 +30,6 @@ type BuildManager struct {
queueSignal chan struct{} queueSignal chan struct{}
} }
func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error {
pkgBuilds, err := Glob(path)
if err != nil {
return fmt.Errorf("error scanning for PKGBUILDs: %w", err)
}
wg := new(sync.WaitGroup)
workerChan := make(chan string, runtime.NumCPU())
for o := 0; o < runtime.NumCPU(); o++ {
wg.Add(1)
go b.SRCINFOWorker(ctx, workerChan, wg)
}
go func() {
for _, pkgbuild := range pkgBuilds {
workerChan <- pkgbuild
}
close(workerChan)
}()
wg.Wait()
return nil
}
func (b *BuildManager) SRCINFOWorker(ctx context.Context, workIn chan string, wg *sync.WaitGroup) {
defer wg.Done()
for pkgbuild := range workIn {
mPkgbuild := PKGBUILD(pkgbuild)
if mPkgbuild.FullRepo() == "trunk" || !Contains(conf.Repos, mPkgbuild.Repo()) ||
containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
continue
}
for _, march := range conf.March {
dbPkg, dbErr := db.DbPackage.Query().Where(
dbpackage.And(
dbpackage.Pkgbase(mPkgbuild.PkgBase()),
dbpackage.RepositoryEQ(dbpackage.Repository(mPkgbuild.Repo())),
dbpackage.March(march),
),
).Only(context.Background())
if ent.IsNotFound(dbErr) {
log.Debugf("[%s/%s] Package not found in database", mPkgbuild.Repo(), mPkgbuild.PkgBase())
} else if dbErr != nil {
log.Errorf("[%s/%s] Problem querying db for package: %v", mPkgbuild.Repo(), mPkgbuild.PkgBase(), dbErr)
}
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
b3s, err := b3sum(pkgbuild)
if err != nil {
log.Errorf("Error hashing PKGBUILD: %v", err)
}
if dbPkg != nil && b3s == dbPkg.Hash {
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s)
continue
} else if dbPkg != nil && b3s != dbPkg.Hash && dbPkg.SrcinfoHash != b3s {
log.Debugf("[%s/%s] srcinfo cleared", mPkgbuild.Repo(), mPkgbuild.PkgBase())
dbPkg = dbPkg.Update().ClearSrcinfo().SaveX(context.Background())
}
proto := &ProtoPackage{
Pkgbuild: pkgbuild,
Pkgbase: mPkgbuild.PkgBase(),
Repo: dbpackage.Repository(mPkgbuild.Repo()),
March: march,
FullRepo: mPkgbuild.Repo() + "-" + march,
Hash: b3s,
DBPackage: dbPkg,
}
_, err = proto.isEligible(ctx)
if err != nil {
log.Infof("Unable to determine status for package %s: %v", proto.Pkgbase, err)
b.repoPurge[proto.FullRepo] <- []*ProtoPackage{proto}
} else if proto.DBPackage != nil {
proto.DBPackage.Update().SetPkgbuild(proto.Pkgbuild).ExecX(ctx)
}
}
}
}
func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error { func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error {
var ( var (
doneQ []*ProtoPackage doneQ []*ProtoPackage
@@ -194,10 +109,10 @@ func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) er
go func(pkg *ProtoPackage) { go func(pkg *ProtoPackage) {
dur, err := pkg.build(ctx) dur, err := pkg.build(ctx)
if err != nil { if err != nil && !errors.Is(err, ErrorNotEligible) {
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err) log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
} else { } else if err == nil {
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur) log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
} }
doneQLock.Lock() doneQLock.Lock()
@@ -289,7 +204,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
Name: repo, Name: repo,
} }
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)). pkgs := db.DBPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).
Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx) Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
for _, pkg := range pkgs { for _, pkg := range pkgs {
@@ -361,7 +276,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
Count int `json:"count"` Count int `json:"count"`
} }
db.DbPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v) db.DBPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v)
for _, c := range v { for _, c := range v {
switch c.Status { switch c.Status {
@@ -381,7 +296,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
Count int `json:"count"` Count int `json:"count"`
} }
db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)). db.DBPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).
GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2) GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
for _, c := range v2 { for _, c := range v2 {
@@ -436,36 +351,37 @@ func (b *BuildManager) repoWorker(repo string) {
} }
for _, pkg := range pkgL { for _, pkg := range pkgL {
pkg.toDBPackage(true) err = pkg.toDBPackage(true)
if err != nil {
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
continue
}
pkgUpd := pkg.DBPackage.Update().
SetStatus(dbpackage.StatusLatest).
ClearSkipReason().
SetRepoVersion(pkg.Version).
SetTagRev(pkg.State.TagRev)
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March, if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March,
pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil { pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
pkg.DBPackage = pkg.DBPackage.Update(). pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsAvailable)
SetStatus(dbpackage.StatusLatest).
ClearSkipReason().
SetDebugSymbols(dbpackage.DebugSymbolsAvailable).
SetRepoVersion(pkg.Version).
SetHash(pkg.Hash).
SaveX(context.Background())
} else { } else {
pkg.DBPackage = pkg.DBPackage.Update(). pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
SetStatus(dbpackage.StatusLatest).
ClearSkipReason().
SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable).
SetRepoVersion(pkg.Version).
SetHash(pkg.Hash).
SaveX(context.Background())
} }
pkg.DBPackage = pkgUpd.SaveX(context.Background())
} }
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1")
res, err = cmd.CombinedOutput() res, err = cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
log.Warningf("Error running paccache: %v", err) log.Warningf("error running paccache: %v", err)
} }
err = updateLastUpdated() err = updateLastUpdated()
if err != nil { if err != nil {
log.Warningf("Error updating lastupdate: %v", err) log.Warningf("error updating lastupdate: %v", err)
} }
b.repoWG.Done() b.repoWG.Done()
case pkgL := <-b.repoPurge[repo]: case pkgL := <-b.repoPurge[repo]:
@@ -475,7 +391,7 @@ func (b *BuildManager) repoWorker(repo string) {
} }
if len(pkg.PkgFiles) == 0 { if len(pkg.PkgFiles) == 0 {
if err := pkg.findPkgFiles(); err != nil { if err := pkg.findPkgFiles(); err != nil {
log.Warningf("[%s/%s] Unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err) log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
continue continue
} else if len(pkg.PkgFiles) == 0 { } else if len(pkg.PkgFiles) == 0 {
continue continue
@@ -500,11 +416,11 @@ func (b *BuildManager) repoWorker(repo string) {
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil && cmd.ProcessState.ExitCode() == 1 { if err != nil && cmd.ProcessState.ExitCode() == 1 {
log.Warningf("Error while deleting package %s: %s", pkg.Pkgbase, string(res)) log.Warningf("error while deleting package %s: %s", pkg.Pkgbase, string(res))
} }
if pkg.DBPackage != nil { if pkg.DBPackage != nil {
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().Exec(context.Background()) _ = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(context.Background())
} }
for _, file := range pkg.PkgFiles { for _, file := range pkg.PkgFiles {
@@ -513,7 +429,7 @@ func (b *BuildManager) repoWorker(repo string) {
} }
err = updateLastUpdated() err = updateLastUpdated()
if err != nil { if err != nil {
log.Warningf("Error updating lastupdate: %v", err) log.Warningf("error updating lastupdate: %v", err)
} }
b.repoWG.Done() b.repoWG.Done()
} }
@@ -522,38 +438,36 @@ func (b *BuildManager) repoWorker(repo string) {
} }
func (b *BuildManager) syncWorker(ctx context.Context) error { func (b *BuildManager) syncWorker(ctx context.Context) error {
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0o755) err := os.MkdirAll(filepath.Join(conf.Basedir.Work), 0o755)
if err != nil { if err != nil {
log.Fatalf("Error creating upstream dir: %v", err) log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
} }
for { for {
for gitDir, gitURL := range conf.Svn2git { gitPath := filepath.Join(conf.Basedir.Work, stateDir)
gitPath := filepath.Join(conf.Basedir.Work, upstreamDir, gitDir)
if _, err := os.Stat(gitPath); os.IsNotExist(err) { if _, err := os.Stat(gitPath); os.IsNotExist(err) {
cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath) cmd := exec.Command("git", "clone", "--depth=1", conf.StateRepo, gitPath)
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
log.Fatalf("Error running git clone: %v", err) log.Fatalf("error cloning state repo: %v", err)
} }
} else if err == nil { } else if err == nil {
cmd := exec.Command("git", "reset", "--hard") cmd := exec.Command("git", "reset", "--hard")
cmd.Dir = gitPath cmd.Dir = gitPath
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
log.Fatalf("Error running git reset: %v", err) log.Fatalf("error reseting state repo: %v", err)
} }
cmd = exec.Command("git", "pull") cmd = exec.Command("git", "pull")
cmd.Dir = gitPath cmd.Dir = gitPath
res, err = cmd.CombinedOutput() res, err = cmd.CombinedOutput()
log.Debug(string(res)) log.Debug(string(res))
if err != nil { if err != nil {
log.Warningf("Failed to update git repo %s: %v", gitDir, err) log.Warningf("failed to update state repo: %v", err)
}
} }
} }
@@ -581,12 +495,12 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
b.alpmMutex.Lock() b.alpmMutex.Lock()
err = alpmHandle.Release() err = alpmHandle.Release()
if err != nil { if err != nil {
log.Fatalf("Error releasing ALPM handle: %v", err) log.Fatalf("error releasing ALPM handle: %v", err)
} }
if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error { if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error {
if err := setupChroot(); err != nil { if err := setupChroot(); err != nil {
log.Warningf("Unable to upgrade chroot, trying again later.") log.Warningf("unable to upgrade chroot, trying again later")
return retry.RetryableError(err) return retry.RetryableError(err)
} }
return nil return nil
@@ -597,30 +511,15 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman")) filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
if err != nil { if err != nil {
log.Warningf("Error while ALPM-init: %v", err) log.Warningf("error while alpm-init: %v", err)
} }
b.alpmMutex.Unlock() b.alpmMutex.Unlock()
// do refreshSRCINFOs twice here queue, err := b.genQueue()
// since MirrorLatest depends on the DB being correct, there can be packages queued which should not be queued,
// so we check them twice to eliminate those.
log.Debugf("generating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
if err != nil { if err != nil {
log.Fatalf("error refreshing PKGBUILDs: %v", err) log.Errorf("error building queue: %v", err)
}
log.Debugf("regenerating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
if err != nil {
log.Fatalf("error refreshing PKGBUILDs: %v", err)
}
queue, err := genQueue()
if err != nil {
log.Warningf("Error building buildQueue: %v", err)
} else { } else {
log.Debugf("buildQueue with %d items", len(queue)) log.Debugf("build-queue with %d items", len(queue))
err = b.buildQueue(queue, ctx) err = b.buildQueue(queue, ctx)
if err != nil { if err != nil {
return err return err
@@ -631,7 +530,7 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
for _, repo := range repos { for _, repo := range repos {
err = movePackagesLive(repo) err = movePackagesLive(repo)
if err != nil { if err != nil {
log.Errorf("[%s] Error moving packages live: %v", repo, err) log.Errorf("[%s] error moving packages live: %v", repo, err)
} }
} }
} else { } else {
@@ -642,3 +541,62 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
time.Sleep(time.Duration(*checkInterval) * time.Minute) time.Sleep(time.Duration(*checkInterval) * time.Minute)
} }
} }
func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
if err != nil {
return nil, fmt.Errorf("error scanning for state-files: %w", err)
}
var pkgbuilds []*ProtoPackage
for _, stateFile := range stateFiles {
stat, err := os.Stat(stateFile)
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") {
continue
}
repo, subRepo, arch, err := stateFileMeta(stateFile)
if err != nil {
log.Warningf("[QG] error generating statefile metadata %s: %v", stateFile, err)
continue
}
if !Contains(conf.Repos, repo) || (subRepo != nil && Contains(conf.Blacklist.Repo, *subRepo) || arch == "any") {
continue
}
rawState, err := os.ReadFile(stateFile)
if err != nil {
log.Warningf("[QG] cannot read statefile %s: %v", stateFile, err)
continue
}
state, err := parseState(string(rawState))
if err != nil {
log.Warningf("[QG] cannot parse statefile %s: %v", stateFile, err)
continue
}
for _, march := range conf.March {
pkg := &ProtoPackage{
Pkgbase: state.Pkgbase,
Repo: dbpackage.Repository(repo),
March: march,
FullRepo: repo + "-" + march,
State: state,
Version: state.PkgVer,
Arch: arch,
}
err = pkg.toDBPackage(true)
if err != nil {
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
continue
}
pkgbuilds = append(pkgbuilds, pkg)
}
}
return pkgbuilds, nil
}

View File

@@ -2,11 +2,8 @@ arch: x86_64
repos: repos:
- core - core
- extra - extra
- community
svn2git: state_repo: "https://gitlab.archlinux.org/archlinux/packaging/state.git"
upstream-core-extra: "https://github.com/archlinux/svntogit-packages.git"
upstream-community: "https://github.com/archlinux/svntogit-community.git"
db: db:
driver: pgx driver: pgx

View File

@@ -21,8 +21,8 @@ type Client struct {
config config
// Schema is the client for creating, migrating and dropping schema. // Schema is the client for creating, migrating and dropping schema.
Schema *migrate.Schema Schema *migrate.Schema
// DbPackage is the client for interacting with the DbPackage builders. // DBPackage is the client for interacting with the DBPackage builders.
DbPackage *DbPackageClient DBPackage *DBPackageClient
} }
// NewClient creates a new client configured with the given options. // NewClient creates a new client configured with the given options.
@@ -36,7 +36,7 @@ func NewClient(opts ...Option) *Client {
func (c *Client) init() { func (c *Client) init() {
c.Schema = migrate.NewSchema(c.driver) c.Schema = migrate.NewSchema(c.driver)
c.DbPackage = NewDbPackageClient(c.config) c.DBPackage = NewDBPackageClient(c.config)
} }
type ( type (
@@ -119,7 +119,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
return &Tx{ return &Tx{
ctx: ctx, ctx: ctx,
config: cfg, config: cfg,
DbPackage: NewDbPackageClient(cfg), DBPackage: NewDBPackageClient(cfg),
}, nil }, nil
} }
@@ -139,14 +139,14 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
return &Tx{ return &Tx{
ctx: ctx, ctx: ctx,
config: cfg, config: cfg,
DbPackage: NewDbPackageClient(cfg), DBPackage: NewDBPackageClient(cfg),
}, nil }, nil
} }
// Debug returns a new debug-client. It's used to get verbose logging on specific operations. // Debug returns a new debug-client. It's used to get verbose logging on specific operations.
// //
// client.Debug(). // client.Debug().
// DbPackage. // DBPackage.
// Query(). // Query().
// Count(ctx) // Count(ctx)
func (c *Client) Debug() *Client { func (c *Client) Debug() *Client {
@@ -168,111 +168,111 @@ func (c *Client) Close() error {
// Use adds the mutation hooks to all the entity clients. // Use adds the mutation hooks to all the entity clients.
// In order to add hooks to a specific client, call: `client.Node.Use(...)`. // In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) { func (c *Client) Use(hooks ...Hook) {
c.DbPackage.Use(hooks...) c.DBPackage.Use(hooks...)
} }
// Intercept adds the query interceptors to all the entity clients. // Intercept adds the query interceptors to all the entity clients.
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
func (c *Client) Intercept(interceptors ...Interceptor) { func (c *Client) Intercept(interceptors ...Interceptor) {
c.DbPackage.Intercept(interceptors...) c.DBPackage.Intercept(interceptors...)
} }
// Mutate implements the ent.Mutator interface. // Mutate implements the ent.Mutator interface.
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
switch m := m.(type) { switch m := m.(type) {
case *DbPackageMutation: case *DBPackageMutation:
return c.DbPackage.mutate(ctx, m) return c.DBPackage.mutate(ctx, m)
default: default:
return nil, fmt.Errorf("ent: unknown mutation type %T", m) return nil, fmt.Errorf("ent: unknown mutation type %T", m)
} }
} }
// DbPackageClient is a client for the DbPackage schema. // DBPackageClient is a client for the DBPackage schema.
type DbPackageClient struct { type DBPackageClient struct {
config config
} }
// NewDbPackageClient returns a client for the DbPackage from the given config. // NewDBPackageClient returns a client for the DBPackage from the given config.
func NewDbPackageClient(c config) *DbPackageClient { func NewDBPackageClient(c config) *DBPackageClient {
return &DbPackageClient{config: c} return &DBPackageClient{config: c}
} }
// Use adds a list of mutation hooks to the hooks stack. // Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `dbpackage.Hooks(f(g(h())))`. // A call to `Use(f, g, h)` equals to `dbpackage.Hooks(f(g(h())))`.
func (c *DbPackageClient) Use(hooks ...Hook) { func (c *DBPackageClient) Use(hooks ...Hook) {
c.hooks.DbPackage = append(c.hooks.DbPackage, hooks...) c.hooks.DBPackage = append(c.hooks.DBPackage, hooks...)
} }
// Intercept adds a list of query interceptors to the interceptors stack. // Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `dbpackage.Intercept(f(g(h())))`. // A call to `Intercept(f, g, h)` equals to `dbpackage.Intercept(f(g(h())))`.
func (c *DbPackageClient) Intercept(interceptors ...Interceptor) { func (c *DBPackageClient) Intercept(interceptors ...Interceptor) {
c.inters.DbPackage = append(c.inters.DbPackage, interceptors...) c.inters.DBPackage = append(c.inters.DBPackage, interceptors...)
} }
// Create returns a builder for creating a DbPackage entity. // Create returns a builder for creating a DBPackage entity.
func (c *DbPackageClient) Create() *DbPackageCreate { func (c *DBPackageClient) Create() *DBPackageCreate {
mutation := newDbPackageMutation(c.config, OpCreate) mutation := newDBPackageMutation(c.config, OpCreate)
return &DbPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} return &DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
} }
// CreateBulk returns a builder for creating a bulk of DbPackage entities. // CreateBulk returns a builder for creating a bulk of DBPackage entities.
func (c *DbPackageClient) CreateBulk(builders ...*DbPackageCreate) *DbPackageCreateBulk { func (c *DBPackageClient) CreateBulk(builders ...*DBPackageCreate) *DBPackageCreateBulk {
return &DbPackageCreateBulk{config: c.config, builders: builders} return &DBPackageCreateBulk{config: c.config, builders: builders}
} }
// Update returns an update builder for DbPackage. // Update returns an update builder for DBPackage.
func (c *DbPackageClient) Update() *DbPackageUpdate { func (c *DBPackageClient) Update() *DBPackageUpdate {
mutation := newDbPackageMutation(c.config, OpUpdate) mutation := newDBPackageMutation(c.config, OpUpdate)
return &DbPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} return &DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
} }
// UpdateOne returns an update builder for the given entity. // UpdateOne returns an update builder for the given entity.
func (c *DbPackageClient) UpdateOne(dp *DbPackage) *DbPackageUpdateOne { func (c *DBPackageClient) UpdateOne(dp *DBPackage) *DBPackageUpdateOne {
mutation := newDbPackageMutation(c.config, OpUpdateOne, withDbPackage(dp)) mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(dp))
return &DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
} }
// UpdateOneID returns an update builder for the given id. // UpdateOneID returns an update builder for the given id.
func (c *DbPackageClient) UpdateOneID(id int) *DbPackageUpdateOne { func (c *DBPackageClient) UpdateOneID(id int) *DBPackageUpdateOne {
mutation := newDbPackageMutation(c.config, OpUpdateOne, withDbPackageID(id)) mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackageID(id))
return &DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
} }
// Delete returns a delete builder for DbPackage. // Delete returns a delete builder for DBPackage.
func (c *DbPackageClient) Delete() *DbPackageDelete { func (c *DBPackageClient) Delete() *DBPackageDelete {
mutation := newDbPackageMutation(c.config, OpDelete) mutation := newDBPackageMutation(c.config, OpDelete)
return &DbPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} return &DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
} }
// DeleteOne returns a builder for deleting the given entity. // DeleteOne returns a builder for deleting the given entity.
func (c *DbPackageClient) DeleteOne(dp *DbPackage) *DbPackageDeleteOne { func (c *DBPackageClient) DeleteOne(dp *DBPackage) *DBPackageDeleteOne {
return c.DeleteOneID(dp.ID) return c.DeleteOneID(dp.ID)
} }
// DeleteOneID returns a builder for deleting the given entity by its id. // DeleteOneID returns a builder for deleting the given entity by its id.
func (c *DbPackageClient) DeleteOneID(id int) *DbPackageDeleteOne { func (c *DBPackageClient) DeleteOneID(id int) *DBPackageDeleteOne {
builder := c.Delete().Where(dbpackage.ID(id)) builder := c.Delete().Where(dbpackage.ID(id))
builder.mutation.id = &id builder.mutation.id = &id
builder.mutation.op = OpDeleteOne builder.mutation.op = OpDeleteOne
return &DbPackageDeleteOne{builder} return &DBPackageDeleteOne{builder}
} }
// Query returns a query builder for DbPackage. // Query returns a query builder for DBPackage.
func (c *DbPackageClient) Query() *DbPackageQuery { func (c *DBPackageClient) Query() *DBPackageQuery {
return &DbPackageQuery{ return &DBPackageQuery{
config: c.config, config: c.config,
ctx: &QueryContext{Type: TypeDbPackage}, ctx: &QueryContext{Type: TypeDBPackage},
inters: c.Interceptors(), inters: c.Interceptors(),
} }
} }
// Get returns a DbPackage entity by its id. // Get returns a DBPackage entity by its id.
func (c *DbPackageClient) Get(ctx context.Context, id int) (*DbPackage, error) { func (c *DBPackageClient) Get(ctx context.Context, id int) (*DBPackage, error) {
return c.Query().Where(dbpackage.ID(id)).Only(ctx) return c.Query().Where(dbpackage.ID(id)).Only(ctx)
} }
// GetX is like Get, but panics if an error occurs. // GetX is like Get, but panics if an error occurs.
func (c *DbPackageClient) GetX(ctx context.Context, id int) *DbPackage { func (c *DBPackageClient) GetX(ctx context.Context, id int) *DBPackage {
obj, err := c.Get(ctx, id) obj, err := c.Get(ctx, id)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -281,36 +281,36 @@ func (c *DbPackageClient) GetX(ctx context.Context, id int) *DbPackage {
} }
// Hooks returns the client hooks. // Hooks returns the client hooks.
func (c *DbPackageClient) Hooks() []Hook { func (c *DBPackageClient) Hooks() []Hook {
return c.hooks.DbPackage return c.hooks.DBPackage
} }
// Interceptors returns the client interceptors. // Interceptors returns the client interceptors.
func (c *DbPackageClient) Interceptors() []Interceptor { func (c *DBPackageClient) Interceptors() []Interceptor {
return c.inters.DbPackage return c.inters.DBPackage
} }
func (c *DbPackageClient) mutate(ctx context.Context, m *DbPackageMutation) (Value, error) { func (c *DBPackageClient) mutate(ctx context.Context, m *DBPackageMutation) (Value, error) {
switch m.Op() { switch m.Op() {
case OpCreate: case OpCreate:
return (&DbPackageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) return (&DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate: case OpUpdate:
return (&DbPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) return (&DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne: case OpUpdateOne:
return (&DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) return (&DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne: case OpDelete, OpDeleteOne:
return (&DbPackageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) return (&DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default: default:
return nil, fmt.Errorf("ent: unknown DbPackage mutation op: %q", m.Op()) return nil, fmt.Errorf("ent: unknown DBPackage mutation op: %q", m.Op())
} }
} }
// hooks and interceptors per client, for fast access. // hooks and interceptors per client, for fast access.
type ( type (
hooks struct { hooks struct {
DbPackage []ent.Hook DBPackage []ent.Hook
} }
inters struct { inters struct {
DbPackage []ent.Interceptor DBPackage []ent.Interceptor
} }
) )

View File

@@ -13,8 +13,8 @@ import (
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage" "somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
) )
// DbPackage is the model entity for the DbPackage schema. // DBPackage is the model entity for the DBPackage schema.
type DbPackage struct { type DBPackage struct {
config `json:"-"` config `json:"-"`
// ID of the ent. // ID of the ent.
ID int `json:"id,omitempty"` ID int `json:"id,omitempty"`
@@ -38,8 +38,6 @@ type DbPackage struct {
BuildTimeStart time.Time `json:"build_time_start,omitempty"` BuildTimeStart time.Time `json:"build_time_start,omitempty"`
// Updated holds the value of the "updated" field. // Updated holds the value of the "updated" field.
Updated time.Time `json:"updated,omitempty"` Updated time.Time `json:"updated,omitempty"`
// Hash holds the value of the "hash" field.
Hash string `json:"hash,omitempty"`
// Lto holds the value of the "lto" field. // Lto holds the value of the "lto" field.
Lto dbpackage.Lto `json:"lto,omitempty"` Lto dbpackage.Lto `json:"lto,omitempty"`
// LastVersionBuild holds the value of the "last_version_build" field. // LastVersionBuild holds the value of the "last_version_build" field.
@@ -58,17 +56,13 @@ type DbPackage struct {
IoIn *int64 `json:"io_in,omitempty"` IoIn *int64 `json:"io_in,omitempty"`
// IoOut holds the value of the "io_out" field. // IoOut holds the value of the "io_out" field.
IoOut *int64 `json:"io_out,omitempty"` IoOut *int64 `json:"io_out,omitempty"`
// Srcinfo holds the value of the "srcinfo" field. // TagRev holds the value of the "tag_rev" field.
Srcinfo *string `json:"srcinfo,omitempty"` TagRev *string `json:"tag_rev,omitempty"`
// SrcinfoHash holds the value of the "srcinfo_hash" field.
SrcinfoHash string `json:"srcinfo_hash,omitempty"`
// Pkgbuild holds the value of the "pkgbuild" field.
Pkgbuild string `json:"pkgbuild,omitempty"`
selectValues sql.SelectValues selectValues sql.SelectValues
} }
// scanValues returns the types for scanning values from sql.Rows. // scanValues returns the types for scanning values from sql.Rows.
func (*DbPackage) scanValues(columns []string) ([]any, error) { func (*DBPackage) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns)) values := make([]any, len(columns))
for i := range columns { for i := range columns {
switch columns[i] { switch columns[i] {
@@ -76,7 +70,7 @@ func (*DbPackage) scanValues(columns []string) ([]any, error) {
values[i] = new([]byte) values[i] = new([]byte)
case dbpackage.FieldID, dbpackage.FieldMaxRss, dbpackage.FieldUTime, dbpackage.FieldSTime, dbpackage.FieldIoIn, dbpackage.FieldIoOut: case dbpackage.FieldID, dbpackage.FieldMaxRss, dbpackage.FieldUTime, dbpackage.FieldSTime, dbpackage.FieldIoIn, dbpackage.FieldIoOut:
values[i] = new(sql.NullInt64) values[i] = new(sql.NullInt64)
case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldHash, dbpackage.FieldLto, dbpackage.FieldLastVersionBuild, dbpackage.FieldDebugSymbols, dbpackage.FieldSrcinfo, dbpackage.FieldSrcinfoHash, dbpackage.FieldPkgbuild: case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldLto, dbpackage.FieldLastVersionBuild, dbpackage.FieldDebugSymbols, dbpackage.FieldTagRev:
values[i] = new(sql.NullString) values[i] = new(sql.NullString)
case dbpackage.FieldBuildTimeStart, dbpackage.FieldUpdated, dbpackage.FieldLastVerified: case dbpackage.FieldBuildTimeStart, dbpackage.FieldUpdated, dbpackage.FieldLastVerified:
values[i] = new(sql.NullTime) values[i] = new(sql.NullTime)
@@ -88,8 +82,8 @@ func (*DbPackage) scanValues(columns []string) ([]any, error) {
} }
// assignValues assigns the values that were returned from sql.Rows (after scanning) // assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DbPackage fields. // to the DBPackage fields.
func (dp *DbPackage) assignValues(columns []string, values []any) error { func (dp *DBPackage) assignValues(columns []string, values []any) error {
if m, n := len(values), len(columns); m < n { if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
} }
@@ -163,12 +157,6 @@ func (dp *DbPackage) assignValues(columns []string, values []any) error {
} else if value.Valid { } else if value.Valid {
dp.Updated = value.Time dp.Updated = value.Time
} }
case dbpackage.FieldHash:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field hash", values[i])
} else if value.Valid {
dp.Hash = value.String
}
case dbpackage.FieldLto: case dbpackage.FieldLto:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field lto", values[i]) return fmt.Errorf("unexpected type %T for field lto", values[i])
@@ -228,24 +216,12 @@ func (dp *DbPackage) assignValues(columns []string, values []any) error {
dp.IoOut = new(int64) dp.IoOut = new(int64)
*dp.IoOut = value.Int64 *dp.IoOut = value.Int64
} }
case dbpackage.FieldSrcinfo: case dbpackage.FieldTagRev:
if value, ok := values[i].(*sql.NullString); !ok { if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field srcinfo", values[i]) return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
} else if value.Valid { } else if value.Valid {
dp.Srcinfo = new(string) dp.TagRev = new(string)
*dp.Srcinfo = value.String *dp.TagRev = value.String
}
case dbpackage.FieldSrcinfoHash:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field srcinfo_hash", values[i])
} else if value.Valid {
dp.SrcinfoHash = value.String
}
case dbpackage.FieldPkgbuild:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field pkgbuild", values[i])
} else if value.Valid {
dp.Pkgbuild = value.String
} }
default: default:
dp.selectValues.Set(columns[i], values[i]) dp.selectValues.Set(columns[i], values[i])
@@ -254,34 +230,34 @@ func (dp *DbPackage) assignValues(columns []string, values []any) error {
return nil return nil
} }
// Value returns the ent.Value that was dynamically selected and assigned to the DbPackage. // Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
// This includes values selected through modifiers, order, etc. // This includes values selected through modifiers, order, etc.
func (dp *DbPackage) Value(name string) (ent.Value, error) { func (dp *DBPackage) Value(name string) (ent.Value, error) {
return dp.selectValues.Get(name) return dp.selectValues.Get(name)
} }
// Update returns a builder for updating this DbPackage. // Update returns a builder for updating this DBPackage.
// Note that you need to call DbPackage.Unwrap() before calling this method if this DbPackage // Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
// was returned from a transaction, and the transaction was committed or rolled back. // was returned from a transaction, and the transaction was committed or rolled back.
func (dp *DbPackage) Update() *DbPackageUpdateOne { func (dp *DBPackage) Update() *DBPackageUpdateOne {
return NewDbPackageClient(dp.config).UpdateOne(dp) return NewDBPackageClient(dp.config).UpdateOne(dp)
} }
// Unwrap unwraps the DbPackage entity that was returned from a transaction after it was closed, // Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction. // so that all future queries will be executed through the driver which created the transaction.
func (dp *DbPackage) Unwrap() *DbPackage { func (dp *DBPackage) Unwrap() *DBPackage {
_tx, ok := dp.config.driver.(*txDriver) _tx, ok := dp.config.driver.(*txDriver)
if !ok { if !ok {
panic("ent: DbPackage is not a transactional entity") panic("ent: DBPackage is not a transactional entity")
} }
dp.config.driver = _tx.drv dp.config.driver = _tx.drv
return dp return dp
} }
// String implements the fmt.Stringer. // String implements the fmt.Stringer.
func (dp *DbPackage) String() string { func (dp *DBPackage) String() string {
var builder strings.Builder var builder strings.Builder
builder.WriteString("DbPackage(") builder.WriteString("DBPackage(")
builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID)) builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID))
builder.WriteString("pkgbase=") builder.WriteString("pkgbase=")
builder.WriteString(dp.Pkgbase) builder.WriteString(dp.Pkgbase)
@@ -313,9 +289,6 @@ func (dp *DbPackage) String() string {
builder.WriteString("updated=") builder.WriteString("updated=")
builder.WriteString(dp.Updated.Format(time.ANSIC)) builder.WriteString(dp.Updated.Format(time.ANSIC))
builder.WriteString(", ") builder.WriteString(", ")
builder.WriteString("hash=")
builder.WriteString(dp.Hash)
builder.WriteString(", ")
builder.WriteString("lto=") builder.WriteString("lto=")
builder.WriteString(fmt.Sprintf("%v", dp.Lto)) builder.WriteString(fmt.Sprintf("%v", dp.Lto))
builder.WriteString(", ") builder.WriteString(", ")
@@ -353,19 +326,13 @@ func (dp *DbPackage) String() string {
builder.WriteString(fmt.Sprintf("%v", *v)) builder.WriteString(fmt.Sprintf("%v", *v))
} }
builder.WriteString(", ") builder.WriteString(", ")
if v := dp.Srcinfo; v != nil { if v := dp.TagRev; v != nil {
builder.WriteString("srcinfo=") builder.WriteString("tag_rev=")
builder.WriteString(*v) builder.WriteString(*v)
} }
builder.WriteString(", ")
builder.WriteString("srcinfo_hash=")
builder.WriteString(dp.SrcinfoHash)
builder.WriteString(", ")
builder.WriteString("pkgbuild=")
builder.WriteString(dp.Pkgbuild)
builder.WriteByte(')') builder.WriteByte(')')
return builder.String() return builder.String()
} }
// DbPackages is a parsable slice of DbPackage. // DBPackages is a parsable slice of DBPackage.
type DbPackages []*DbPackage type DBPackages []*DBPackage

View File

@@ -33,8 +33,6 @@ const (
FieldBuildTimeStart = "build_time_start" FieldBuildTimeStart = "build_time_start"
// FieldUpdated holds the string denoting the updated field in the database. // FieldUpdated holds the string denoting the updated field in the database.
FieldUpdated = "updated" FieldUpdated = "updated"
// FieldHash holds the string denoting the hash field in the database.
FieldHash = "hash"
// FieldLto holds the string denoting the lto field in the database. // FieldLto holds the string denoting the lto field in the database.
FieldLto = "lto" FieldLto = "lto"
// FieldLastVersionBuild holds the string denoting the last_version_build field in the database. // FieldLastVersionBuild holds the string denoting the last_version_build field in the database.
@@ -53,12 +51,8 @@ const (
FieldIoIn = "io_in" FieldIoIn = "io_in"
// FieldIoOut holds the string denoting the io_out field in the database. // FieldIoOut holds the string denoting the io_out field in the database.
FieldIoOut = "io_out" FieldIoOut = "io_out"
// FieldSrcinfo holds the string denoting the srcinfo field in the database. // FieldTagRev holds the string denoting the tag_rev field in the database.
FieldSrcinfo = "srcinfo" FieldTagRev = "tag_rev"
// FieldSrcinfoHash holds the string denoting the srcinfo_hash field in the database.
FieldSrcinfoHash = "srcinfo_hash"
// FieldPkgbuild holds the string denoting the pkgbuild field in the database.
FieldPkgbuild = "pkgbuild"
// Table holds the table name of the dbpackage in the database. // Table holds the table name of the dbpackage in the database.
Table = "db_packages" Table = "db_packages"
) )
@@ -76,7 +70,6 @@ var Columns = []string{
FieldRepoVersion, FieldRepoVersion,
FieldBuildTimeStart, FieldBuildTimeStart,
FieldUpdated, FieldUpdated,
FieldHash,
FieldLto, FieldLto,
FieldLastVersionBuild, FieldLastVersionBuild,
FieldLastVerified, FieldLastVerified,
@@ -86,9 +79,7 @@ var Columns = []string{
FieldSTime, FieldSTime,
FieldIoIn, FieldIoIn,
FieldIoOut, FieldIoOut,
FieldSrcinfo, FieldTagRev,
FieldSrcinfoHash,
FieldPkgbuild,
} }
// ValidColumn reports if the column name is valid (part of the table columns). // ValidColumn reports if the column name is valid (part of the table columns).
@@ -146,9 +137,8 @@ type Repository string
// Repository values. // Repository values.
const ( const (
RepositoryExtra Repository = "extra" RepositoryExtra Repository = "extra"
RepositoryCore Repository = "core" RepositoryCore Repository = "core"
RepositoryCommunity Repository = "community"
) )
func (r Repository) String() string { func (r Repository) String() string {
@@ -158,7 +148,7 @@ func (r Repository) String() string {
// RepositoryValidator is a validator for the "repository" field enum values. It is called by the builders before save. // RepositoryValidator is a validator for the "repository" field enum values. It is called by the builders before save.
func RepositoryValidator(r Repository) error { func RepositoryValidator(r Repository) error {
switch r { switch r {
case RepositoryExtra, RepositoryCore, RepositoryCommunity: case RepositoryExtra, RepositoryCore:
return nil return nil
default: default:
return fmt.Errorf("dbpackage: invalid enum value for repository field: %q", r) return fmt.Errorf("dbpackage: invalid enum value for repository field: %q", r)
@@ -220,7 +210,7 @@ func DebugSymbolsValidator(ds DebugSymbols) error {
} }
} }
// OrderOption defines the ordering options for the DbPackage queries. // OrderOption defines the ordering options for the DBPackage queries.
type OrderOption func(*sql.Selector) type OrderOption func(*sql.Selector)
// ByID orders the results by the id field. // ByID orders the results by the id field.
@@ -273,11 +263,6 @@ func ByUpdated(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdated, opts...).ToFunc() return sql.OrderByField(FieldUpdated, opts...).ToFunc()
} }
// ByHash orders the results by the hash field.
func ByHash(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldHash, opts...).ToFunc()
}
// ByLto orders the results by the lto field. // ByLto orders the results by the lto field.
func ByLto(opts ...sql.OrderTermOption) OrderOption { func ByLto(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLto, opts...).ToFunc() return sql.OrderByField(FieldLto, opts...).ToFunc()
@@ -323,17 +308,7 @@ func ByIoOut(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIoOut, opts...).ToFunc() return sql.OrderByField(FieldIoOut, opts...).ToFunc()
} }
// BySrcinfo orders the results by the srcinfo field. // ByTagRev orders the results by the tag_rev field.
func BySrcinfo(opts ...sql.OrderTermOption) OrderOption { func ByTagRev(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSrcinfo, opts...).ToFunc() return sql.OrderByField(FieldTagRev, opts...).ToFunc()
}
// BySrcinfoHash orders the results by the srcinfo_hash field.
func BySrcinfoHash(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSrcinfoHash, opts...).ToFunc()
}
// ByPkgbuild orders the results by the pkgbuild field.
func ByPkgbuild(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPkgbuild, opts...).ToFunc()
} }

File diff suppressed because it is too large Load Diff

View File

@@ -13,33 +13,33 @@ import (
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage" "somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
) )
// DbPackageCreate is the builder for creating a DbPackage entity. // DBPackageCreate is the builder for creating a DBPackage entity.
type DbPackageCreate struct { type DBPackageCreate struct {
config config
mutation *DbPackageMutation mutation *DBPackageMutation
hooks []Hook hooks []Hook
} }
// SetPkgbase sets the "pkgbase" field. // SetPkgbase sets the "pkgbase" field.
func (dpc *DbPackageCreate) SetPkgbase(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetPkgbase(s string) *DBPackageCreate {
dpc.mutation.SetPkgbase(s) dpc.mutation.SetPkgbase(s)
return dpc return dpc
} }
// SetPackages sets the "packages" field. // SetPackages sets the "packages" field.
func (dpc *DbPackageCreate) SetPackages(s []string) *DbPackageCreate { func (dpc *DBPackageCreate) SetPackages(s []string) *DBPackageCreate {
dpc.mutation.SetPackages(s) dpc.mutation.SetPackages(s)
return dpc return dpc
} }
// SetStatus sets the "status" field. // SetStatus sets the "status" field.
func (dpc *DbPackageCreate) SetStatus(d dbpackage.Status) *DbPackageCreate { func (dpc *DBPackageCreate) SetStatus(d dbpackage.Status) *DBPackageCreate {
dpc.mutation.SetStatus(d) dpc.mutation.SetStatus(d)
return dpc return dpc
} }
// SetNillableStatus sets the "status" field if the given value is not nil. // SetNillableStatus sets the "status" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableStatus(d *dbpackage.Status) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableStatus(d *dbpackage.Status) *DBPackageCreate {
if d != nil { if d != nil {
dpc.SetStatus(*d) dpc.SetStatus(*d)
} }
@@ -47,13 +47,13 @@ func (dpc *DbPackageCreate) SetNillableStatus(d *dbpackage.Status) *DbPackageCre
} }
// SetSkipReason sets the "skip_reason" field. // SetSkipReason sets the "skip_reason" field.
func (dpc *DbPackageCreate) SetSkipReason(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetSkipReason(s string) *DBPackageCreate {
dpc.mutation.SetSkipReason(s) dpc.mutation.SetSkipReason(s)
return dpc return dpc
} }
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil. // SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableSkipReason(s *string) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableSkipReason(s *string) *DBPackageCreate {
if s != nil { if s != nil {
dpc.SetSkipReason(*s) dpc.SetSkipReason(*s)
} }
@@ -61,25 +61,25 @@ func (dpc *DbPackageCreate) SetNillableSkipReason(s *string) *DbPackageCreate {
} }
// SetRepository sets the "repository" field. // SetRepository sets the "repository" field.
func (dpc *DbPackageCreate) SetRepository(d dbpackage.Repository) *DbPackageCreate { func (dpc *DBPackageCreate) SetRepository(d dbpackage.Repository) *DBPackageCreate {
dpc.mutation.SetRepository(d) dpc.mutation.SetRepository(d)
return dpc return dpc
} }
// SetMarch sets the "march" field. // SetMarch sets the "march" field.
func (dpc *DbPackageCreate) SetMarch(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetMarch(s string) *DBPackageCreate {
dpc.mutation.SetMarch(s) dpc.mutation.SetMarch(s)
return dpc return dpc
} }
// SetVersion sets the "version" field. // SetVersion sets the "version" field.
func (dpc *DbPackageCreate) SetVersion(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetVersion(s string) *DBPackageCreate {
dpc.mutation.SetVersion(s) dpc.mutation.SetVersion(s)
return dpc return dpc
} }
// SetNillableVersion sets the "version" field if the given value is not nil. // SetNillableVersion sets the "version" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableVersion(s *string) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableVersion(s *string) *DBPackageCreate {
if s != nil { if s != nil {
dpc.SetVersion(*s) dpc.SetVersion(*s)
} }
@@ -87,13 +87,13 @@ func (dpc *DbPackageCreate) SetNillableVersion(s *string) *DbPackageCreate {
} }
// SetRepoVersion sets the "repo_version" field. // SetRepoVersion sets the "repo_version" field.
func (dpc *DbPackageCreate) SetRepoVersion(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetRepoVersion(s string) *DBPackageCreate {
dpc.mutation.SetRepoVersion(s) dpc.mutation.SetRepoVersion(s)
return dpc return dpc
} }
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil. // SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableRepoVersion(s *string) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableRepoVersion(s *string) *DBPackageCreate {
if s != nil { if s != nil {
dpc.SetRepoVersion(*s) dpc.SetRepoVersion(*s)
} }
@@ -101,13 +101,13 @@ func (dpc *DbPackageCreate) SetNillableRepoVersion(s *string) *DbPackageCreate {
} }
// SetBuildTimeStart sets the "build_time_start" field. // SetBuildTimeStart sets the "build_time_start" field.
func (dpc *DbPackageCreate) SetBuildTimeStart(t time.Time) *DbPackageCreate { func (dpc *DBPackageCreate) SetBuildTimeStart(t time.Time) *DBPackageCreate {
dpc.mutation.SetBuildTimeStart(t) dpc.mutation.SetBuildTimeStart(t)
return dpc return dpc
} }
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil. // SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DBPackageCreate {
if t != nil { if t != nil {
dpc.SetBuildTimeStart(*t) dpc.SetBuildTimeStart(*t)
} }
@@ -115,41 +115,27 @@ func (dpc *DbPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DbPackageCr
} }
// SetUpdated sets the "updated" field. // SetUpdated sets the "updated" field.
func (dpc *DbPackageCreate) SetUpdated(t time.Time) *DbPackageCreate { func (dpc *DBPackageCreate) SetUpdated(t time.Time) *DBPackageCreate {
dpc.mutation.SetUpdated(t) dpc.mutation.SetUpdated(t)
return dpc return dpc
} }
// SetNillableUpdated sets the "updated" field if the given value is not nil. // SetNillableUpdated sets the "updated" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableUpdated(t *time.Time) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableUpdated(t *time.Time) *DBPackageCreate {
if t != nil { if t != nil {
dpc.SetUpdated(*t) dpc.SetUpdated(*t)
} }
return dpc return dpc
} }
// SetHash sets the "hash" field.
func (dpc *DbPackageCreate) SetHash(s string) *DbPackageCreate {
dpc.mutation.SetHash(s)
return dpc
}
// SetNillableHash sets the "hash" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableHash(s *string) *DbPackageCreate {
if s != nil {
dpc.SetHash(*s)
}
return dpc
}
// SetLto sets the "lto" field. // SetLto sets the "lto" field.
func (dpc *DbPackageCreate) SetLto(d dbpackage.Lto) *DbPackageCreate { func (dpc *DBPackageCreate) SetLto(d dbpackage.Lto) *DBPackageCreate {
dpc.mutation.SetLto(d) dpc.mutation.SetLto(d)
return dpc return dpc
} }
// SetNillableLto sets the "lto" field if the given value is not nil. // SetNillableLto sets the "lto" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableLto(d *dbpackage.Lto) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableLto(d *dbpackage.Lto) *DBPackageCreate {
if d != nil { if d != nil {
dpc.SetLto(*d) dpc.SetLto(*d)
} }
@@ -157,13 +143,13 @@ func (dpc *DbPackageCreate) SetNillableLto(d *dbpackage.Lto) *DbPackageCreate {
} }
// SetLastVersionBuild sets the "last_version_build" field. // SetLastVersionBuild sets the "last_version_build" field.
func (dpc *DbPackageCreate) SetLastVersionBuild(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetLastVersionBuild(s string) *DBPackageCreate {
dpc.mutation.SetLastVersionBuild(s) dpc.mutation.SetLastVersionBuild(s)
return dpc return dpc
} }
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil. // SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableLastVersionBuild(s *string) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableLastVersionBuild(s *string) *DBPackageCreate {
if s != nil { if s != nil {
dpc.SetLastVersionBuild(*s) dpc.SetLastVersionBuild(*s)
} }
@@ -171,13 +157,13 @@ func (dpc *DbPackageCreate) SetNillableLastVersionBuild(s *string) *DbPackageCre
} }
// SetLastVerified sets the "last_verified" field. // SetLastVerified sets the "last_verified" field.
func (dpc *DbPackageCreate) SetLastVerified(t time.Time) *DbPackageCreate { func (dpc *DBPackageCreate) SetLastVerified(t time.Time) *DBPackageCreate {
dpc.mutation.SetLastVerified(t) dpc.mutation.SetLastVerified(t)
return dpc return dpc
} }
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil. // SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableLastVerified(t *time.Time) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableLastVerified(t *time.Time) *DBPackageCreate {
if t != nil { if t != nil {
dpc.SetLastVerified(*t) dpc.SetLastVerified(*t)
} }
@@ -185,13 +171,13 @@ func (dpc *DbPackageCreate) SetNillableLastVerified(t *time.Time) *DbPackageCrea
} }
// SetDebugSymbols sets the "debug_symbols" field. // SetDebugSymbols sets the "debug_symbols" field.
func (dpc *DbPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DbPackageCreate { func (dpc *DBPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DBPackageCreate {
dpc.mutation.SetDebugSymbols(ds) dpc.mutation.SetDebugSymbols(ds)
return dpc return dpc
} }
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil. // SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DBPackageCreate {
if ds != nil { if ds != nil {
dpc.SetDebugSymbols(*ds) dpc.SetDebugSymbols(*ds)
} }
@@ -199,13 +185,13 @@ func (dpc *DbPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols)
} }
// SetMaxRss sets the "max_rss" field. // SetMaxRss sets the "max_rss" field.
func (dpc *DbPackageCreate) SetMaxRss(i int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetMaxRss(i int64) *DBPackageCreate {
dpc.mutation.SetMaxRss(i) dpc.mutation.SetMaxRss(i)
return dpc return dpc
} }
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil. // SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableMaxRss(i *int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableMaxRss(i *int64) *DBPackageCreate {
if i != nil { if i != nil {
dpc.SetMaxRss(*i) dpc.SetMaxRss(*i)
} }
@@ -213,13 +199,13 @@ func (dpc *DbPackageCreate) SetNillableMaxRss(i *int64) *DbPackageCreate {
} }
// SetUTime sets the "u_time" field. // SetUTime sets the "u_time" field.
func (dpc *DbPackageCreate) SetUTime(i int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetUTime(i int64) *DBPackageCreate {
dpc.mutation.SetUTime(i) dpc.mutation.SetUTime(i)
return dpc return dpc
} }
// SetNillableUTime sets the "u_time" field if the given value is not nil. // SetNillableUTime sets the "u_time" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableUTime(i *int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableUTime(i *int64) *DBPackageCreate {
if i != nil { if i != nil {
dpc.SetUTime(*i) dpc.SetUTime(*i)
} }
@@ -227,13 +213,13 @@ func (dpc *DbPackageCreate) SetNillableUTime(i *int64) *DbPackageCreate {
} }
// SetSTime sets the "s_time" field. // SetSTime sets the "s_time" field.
func (dpc *DbPackageCreate) SetSTime(i int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetSTime(i int64) *DBPackageCreate {
dpc.mutation.SetSTime(i) dpc.mutation.SetSTime(i)
return dpc return dpc
} }
// SetNillableSTime sets the "s_time" field if the given value is not nil. // SetNillableSTime sets the "s_time" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableSTime(i *int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableSTime(i *int64) *DBPackageCreate {
if i != nil { if i != nil {
dpc.SetSTime(*i) dpc.SetSTime(*i)
} }
@@ -241,13 +227,13 @@ func (dpc *DbPackageCreate) SetNillableSTime(i *int64) *DbPackageCreate {
} }
// SetIoIn sets the "io_in" field. // SetIoIn sets the "io_in" field.
func (dpc *DbPackageCreate) SetIoIn(i int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetIoIn(i int64) *DBPackageCreate {
dpc.mutation.SetIoIn(i) dpc.mutation.SetIoIn(i)
return dpc return dpc
} }
// SetNillableIoIn sets the "io_in" field if the given value is not nil. // SetNillableIoIn sets the "io_in" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableIoIn(i *int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableIoIn(i *int64) *DBPackageCreate {
if i != nil { if i != nil {
dpc.SetIoIn(*i) dpc.SetIoIn(*i)
} }
@@ -255,74 +241,46 @@ func (dpc *DbPackageCreate) SetNillableIoIn(i *int64) *DbPackageCreate {
} }
// SetIoOut sets the "io_out" field. // SetIoOut sets the "io_out" field.
func (dpc *DbPackageCreate) SetIoOut(i int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetIoOut(i int64) *DBPackageCreate {
dpc.mutation.SetIoOut(i) dpc.mutation.SetIoOut(i)
return dpc return dpc
} }
// SetNillableIoOut sets the "io_out" field if the given value is not nil. // SetNillableIoOut sets the "io_out" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableIoOut(i *int64) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableIoOut(i *int64) *DBPackageCreate {
if i != nil { if i != nil {
dpc.SetIoOut(*i) dpc.SetIoOut(*i)
} }
return dpc return dpc
} }
// SetSrcinfo sets the "srcinfo" field. // SetTagRev sets the "tag_rev" field.
func (dpc *DbPackageCreate) SetSrcinfo(s string) *DbPackageCreate { func (dpc *DBPackageCreate) SetTagRev(s string) *DBPackageCreate {
dpc.mutation.SetSrcinfo(s) dpc.mutation.SetTagRev(s)
return dpc return dpc
} }
// SetNillableSrcinfo sets the "srcinfo" field if the given value is not nil. // SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableSrcinfo(s *string) *DbPackageCreate { func (dpc *DBPackageCreate) SetNillableTagRev(s *string) *DBPackageCreate {
if s != nil { if s != nil {
dpc.SetSrcinfo(*s) dpc.SetTagRev(*s)
} }
return dpc return dpc
} }
// SetSrcinfoHash sets the "srcinfo_hash" field. // Mutation returns the DBPackageMutation object of the builder.
func (dpc *DbPackageCreate) SetSrcinfoHash(s string) *DbPackageCreate { func (dpc *DBPackageCreate) Mutation() *DBPackageMutation {
dpc.mutation.SetSrcinfoHash(s)
return dpc
}
// SetNillableSrcinfoHash sets the "srcinfo_hash" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillableSrcinfoHash(s *string) *DbPackageCreate {
if s != nil {
dpc.SetSrcinfoHash(*s)
}
return dpc
}
// SetPkgbuild sets the "pkgbuild" field.
func (dpc *DbPackageCreate) SetPkgbuild(s string) *DbPackageCreate {
dpc.mutation.SetPkgbuild(s)
return dpc
}
// SetNillablePkgbuild sets the "pkgbuild" field if the given value is not nil.
func (dpc *DbPackageCreate) SetNillablePkgbuild(s *string) *DbPackageCreate {
if s != nil {
dpc.SetPkgbuild(*s)
}
return dpc
}
// Mutation returns the DbPackageMutation object of the builder.
func (dpc *DbPackageCreate) Mutation() *DbPackageMutation {
return dpc.mutation return dpc.mutation
} }
// Save creates the DbPackage in the database. // Save creates the DBPackage in the database.
func (dpc *DbPackageCreate) Save(ctx context.Context) (*DbPackage, error) { func (dpc *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
dpc.defaults() dpc.defaults()
return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks) return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks)
} }
// SaveX calls Save and panics if Save returns an error. // SaveX calls Save and panics if Save returns an error.
func (dpc *DbPackageCreate) SaveX(ctx context.Context) *DbPackage { func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
v, err := dpc.Save(ctx) v, err := dpc.Save(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -331,20 +289,20 @@ func (dpc *DbPackageCreate) SaveX(ctx context.Context) *DbPackage {
} }
// Exec executes the query. // Exec executes the query.
func (dpc *DbPackageCreate) Exec(ctx context.Context) error { func (dpc *DBPackageCreate) Exec(ctx context.Context) error {
_, err := dpc.Save(ctx) _, err := dpc.Save(ctx)
return err return err
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpc *DbPackageCreate) ExecX(ctx context.Context) { func (dpc *DBPackageCreate) ExecX(ctx context.Context) {
if err := dpc.Exec(ctx); err != nil { if err := dpc.Exec(ctx); err != nil {
panic(err) panic(err)
} }
} }
// defaults sets the default values of the builder before save. // defaults sets the default values of the builder before save.
func (dpc *DbPackageCreate) defaults() { func (dpc *DBPackageCreate) defaults() {
if _, ok := dpc.mutation.Status(); !ok { if _, ok := dpc.mutation.Status(); !ok {
v := dbpackage.DefaultStatus v := dbpackage.DefaultStatus
dpc.mutation.SetStatus(v) dpc.mutation.SetStatus(v)
@@ -360,50 +318,50 @@ func (dpc *DbPackageCreate) defaults() {
} }
// check runs all checks and user-defined validators on the builder. // check runs all checks and user-defined validators on the builder.
func (dpc *DbPackageCreate) check() error { func (dpc *DBPackageCreate) check() error {
if _, ok := dpc.mutation.Pkgbase(); !ok { if _, ok := dpc.mutation.Pkgbase(); !ok {
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DbPackage.pkgbase"`)} return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
} }
if v, ok := dpc.mutation.Pkgbase(); ok { if v, ok := dpc.mutation.Pkgbase(); ok {
if err := dbpackage.PkgbaseValidator(v); err != nil { if err := dbpackage.PkgbaseValidator(v); err != nil {
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DbPackage.pkgbase": %w`, err)} return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
} }
} }
if v, ok := dpc.mutation.Status(); ok { if v, ok := dpc.mutation.Status(); ok {
if err := dbpackage.StatusValidator(v); err != nil { if err := dbpackage.StatusValidator(v); err != nil {
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DbPackage.status": %w`, err)} return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
} }
} }
if _, ok := dpc.mutation.Repository(); !ok { if _, ok := dpc.mutation.Repository(); !ok {
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DbPackage.repository"`)} return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
} }
if v, ok := dpc.mutation.Repository(); ok { if v, ok := dpc.mutation.Repository(); ok {
if err := dbpackage.RepositoryValidator(v); err != nil { if err := dbpackage.RepositoryValidator(v); err != nil {
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DbPackage.repository": %w`, err)} return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
} }
} }
if _, ok := dpc.mutation.March(); !ok { if _, ok := dpc.mutation.March(); !ok {
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DbPackage.march"`)} return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
} }
if v, ok := dpc.mutation.March(); ok { if v, ok := dpc.mutation.March(); ok {
if err := dbpackage.MarchValidator(v); err != nil { if err := dbpackage.MarchValidator(v); err != nil {
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DbPackage.march": %w`, err)} return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
} }
} }
if v, ok := dpc.mutation.Lto(); ok { if v, ok := dpc.mutation.Lto(); ok {
if err := dbpackage.LtoValidator(v); err != nil { if err := dbpackage.LtoValidator(v); err != nil {
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DbPackage.lto": %w`, err)} return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
} }
} }
if v, ok := dpc.mutation.DebugSymbols(); ok { if v, ok := dpc.mutation.DebugSymbols(); ok {
if err := dbpackage.DebugSymbolsValidator(v); err != nil { if err := dbpackage.DebugSymbolsValidator(v); err != nil {
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DbPackage.debug_symbols": %w`, err)} return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
} }
} }
return nil return nil
} }
func (dpc *DbPackageCreate) sqlSave(ctx context.Context) (*DbPackage, error) { func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
if err := dpc.check(); err != nil { if err := dpc.check(); err != nil {
return nil, err return nil, err
} }
@@ -421,9 +379,9 @@ func (dpc *DbPackageCreate) sqlSave(ctx context.Context) (*DbPackage, error) {
return _node, nil return _node, nil
} }
func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) { func (dpc *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
var ( var (
_node = &DbPackage{config: dpc.config} _node = &DBPackage{config: dpc.config}
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) _spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
) )
if value, ok := dpc.mutation.Pkgbase(); ok { if value, ok := dpc.mutation.Pkgbase(); ok {
@@ -466,10 +424,6 @@ func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) {
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value) _spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
_node.Updated = value _node.Updated = value
} }
if value, ok := dpc.mutation.Hash(); ok {
_spec.SetField(dbpackage.FieldHash, field.TypeString, value)
_node.Hash = value
}
if value, ok := dpc.mutation.Lto(); ok { if value, ok := dpc.mutation.Lto(); ok {
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value) _spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
_node.Lto = value _node.Lto = value
@@ -506,38 +460,30 @@ func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) {
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value) _spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
_node.IoOut = &value _node.IoOut = &value
} }
if value, ok := dpc.mutation.Srcinfo(); ok { if value, ok := dpc.mutation.TagRev(); ok {
_spec.SetField(dbpackage.FieldSrcinfo, field.TypeString, value) _spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
_node.Srcinfo = &value _node.TagRev = &value
}
if value, ok := dpc.mutation.SrcinfoHash(); ok {
_spec.SetField(dbpackage.FieldSrcinfoHash, field.TypeString, value)
_node.SrcinfoHash = value
}
if value, ok := dpc.mutation.Pkgbuild(); ok {
_spec.SetField(dbpackage.FieldPkgbuild, field.TypeString, value)
_node.Pkgbuild = value
} }
return _node, _spec return _node, _spec
} }
// DbPackageCreateBulk is the builder for creating many DbPackage entities in bulk. // DBPackageCreateBulk is the builder for creating many DBPackage entities in bulk.
type DbPackageCreateBulk struct { type DBPackageCreateBulk struct {
config config
builders []*DbPackageCreate builders []*DBPackageCreate
} }
// Save creates the DbPackage entities in the database. // Save creates the DBPackage entities in the database.
func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error) { func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders)) specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders))
nodes := make([]*DbPackage, len(dpcb.builders)) nodes := make([]*DBPackage, len(dpcb.builders))
mutators := make([]Mutator, len(dpcb.builders)) mutators := make([]Mutator, len(dpcb.builders))
for i := range dpcb.builders { for i := range dpcb.builders {
func(i int, root context.Context) { func(i int, root context.Context) {
builder := dpcb.builders[i] builder := dpcb.builders[i]
builder.defaults() builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DbPackageMutation) mutation, ok := m.(*DBPackageMutation)
if !ok { if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m) return nil, fmt.Errorf("unexpected mutation type %T", m)
} }
@@ -584,7 +530,7 @@ func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error)
} }
// SaveX is like Save, but panics if an error occurs. // SaveX is like Save, but panics if an error occurs.
func (dpcb *DbPackageCreateBulk) SaveX(ctx context.Context) []*DbPackage { func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
v, err := dpcb.Save(ctx) v, err := dpcb.Save(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -593,13 +539,13 @@ func (dpcb *DbPackageCreateBulk) SaveX(ctx context.Context) []*DbPackage {
} }
// Exec executes the query. // Exec executes the query.
func (dpcb *DbPackageCreateBulk) Exec(ctx context.Context) error { func (dpcb *DBPackageCreateBulk) Exec(ctx context.Context) error {
_, err := dpcb.Save(ctx) _, err := dpcb.Save(ctx)
return err return err
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpcb *DbPackageCreateBulk) ExecX(ctx context.Context) { func (dpcb *DBPackageCreateBulk) ExecX(ctx context.Context) {
if err := dpcb.Exec(ctx); err != nil { if err := dpcb.Exec(ctx); err != nil {
panic(err) panic(err)
} }

View File

@@ -12,26 +12,26 @@ import (
"somegit.dev/ALHP/ALHP.GO/ent/predicate" "somegit.dev/ALHP/ALHP.GO/ent/predicate"
) )
// DbPackageDelete is the builder for deleting a DbPackage entity. // DBPackageDelete is the builder for deleting a DBPackage entity.
type DbPackageDelete struct { type DBPackageDelete struct {
config config
hooks []Hook hooks []Hook
mutation *DbPackageMutation mutation *DBPackageMutation
} }
// Where appends a list predicates to the DbPackageDelete builder. // Where appends a list predicates to the DBPackageDelete builder.
func (dpd *DbPackageDelete) Where(ps ...predicate.DbPackage) *DbPackageDelete { func (dpd *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
dpd.mutation.Where(ps...) dpd.mutation.Where(ps...)
return dpd return dpd
} }
// Exec executes the deletion query and returns how many vertices were deleted. // Exec executes the deletion query and returns how many vertices were deleted.
func (dpd *DbPackageDelete) Exec(ctx context.Context) (int, error) { func (dpd *DBPackageDelete) Exec(ctx context.Context) (int, error) {
return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks) return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks)
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpd *DbPackageDelete) ExecX(ctx context.Context) int { func (dpd *DBPackageDelete) ExecX(ctx context.Context) int {
n, err := dpd.Exec(ctx) n, err := dpd.Exec(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -39,7 +39,7 @@ func (dpd *DbPackageDelete) ExecX(ctx context.Context) int {
return n return n
} }
func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) { func (dpd *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) _spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
if ps := dpd.mutation.predicates; len(ps) > 0 { if ps := dpd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) { _spec.Predicate = func(selector *sql.Selector) {
@@ -56,19 +56,19 @@ func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) {
return affected, err return affected, err
} }
// DbPackageDeleteOne is the builder for deleting a single DbPackage entity. // DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
type DbPackageDeleteOne struct { type DBPackageDeleteOne struct {
dpd *DbPackageDelete dpd *DBPackageDelete
} }
// Where appends a list predicates to the DbPackageDelete builder. // Where appends a list predicates to the DBPackageDelete builder.
func (dpdo *DbPackageDeleteOne) Where(ps ...predicate.DbPackage) *DbPackageDeleteOne { func (dpdo *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
dpdo.dpd.mutation.Where(ps...) dpdo.dpd.mutation.Where(ps...)
return dpdo return dpdo
} }
// Exec executes the deletion query. // Exec executes the deletion query.
func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error { func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error {
n, err := dpdo.dpd.Exec(ctx) n, err := dpdo.dpd.Exec(ctx)
switch { switch {
case err != nil: case err != nil:
@@ -81,7 +81,7 @@ func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error {
} }
// ExecX is like Exec, but panics if an error occurs. // ExecX is like Exec, but panics if an error occurs.
func (dpdo *DbPackageDeleteOne) ExecX(ctx context.Context) { func (dpdo *DBPackageDeleteOne) ExecX(ctx context.Context) {
if err := dpdo.Exec(ctx); err != nil { if err := dpdo.Exec(ctx); err != nil {
panic(err) panic(err)
} }

View File

@@ -14,53 +14,53 @@ import (
"somegit.dev/ALHP/ALHP.GO/ent/predicate" "somegit.dev/ALHP/ALHP.GO/ent/predicate"
) )
// DbPackageQuery is the builder for querying DbPackage entities. // DBPackageQuery is the builder for querying DBPackage entities.
type DbPackageQuery struct { type DBPackageQuery struct {
config config
ctx *QueryContext ctx *QueryContext
order []dbpackage.OrderOption order []dbpackage.OrderOption
inters []Interceptor inters []Interceptor
predicates []predicate.DbPackage predicates []predicate.DBPackage
modifiers []func(*sql.Selector) modifiers []func(*sql.Selector)
// intermediate query (i.e. traversal path). // intermediate query (i.e. traversal path).
sql *sql.Selector sql *sql.Selector
path func(context.Context) (*sql.Selector, error) path func(context.Context) (*sql.Selector, error)
} }
// Where adds a new predicate for the DbPackageQuery builder. // Where adds a new predicate for the DBPackageQuery builder.
func (dpq *DbPackageQuery) Where(ps ...predicate.DbPackage) *DbPackageQuery { func (dpq *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery {
dpq.predicates = append(dpq.predicates, ps...) dpq.predicates = append(dpq.predicates, ps...)
return dpq return dpq
} }
// Limit the number of records to be returned by this query. // Limit the number of records to be returned by this query.
func (dpq *DbPackageQuery) Limit(limit int) *DbPackageQuery { func (dpq *DBPackageQuery) Limit(limit int) *DBPackageQuery {
dpq.ctx.Limit = &limit dpq.ctx.Limit = &limit
return dpq return dpq
} }
// Offset to start from. // Offset to start from.
func (dpq *DbPackageQuery) Offset(offset int) *DbPackageQuery { func (dpq *DBPackageQuery) Offset(offset int) *DBPackageQuery {
dpq.ctx.Offset = &offset dpq.ctx.Offset = &offset
return dpq return dpq
} }
// Unique configures the query builder to filter duplicate records on query. // Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method. // By default, unique is set to true, and can be disabled using this method.
func (dpq *DbPackageQuery) Unique(unique bool) *DbPackageQuery { func (dpq *DBPackageQuery) Unique(unique bool) *DBPackageQuery {
dpq.ctx.Unique = &unique dpq.ctx.Unique = &unique
return dpq return dpq
} }
// Order specifies how the records should be ordered. // Order specifies how the records should be ordered.
func (dpq *DbPackageQuery) Order(o ...dbpackage.OrderOption) *DbPackageQuery { func (dpq *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery {
dpq.order = append(dpq.order, o...) dpq.order = append(dpq.order, o...)
return dpq return dpq
} }
// First returns the first DbPackage entity from the query. // First returns the first DBPackage entity from the query.
// Returns a *NotFoundError when no DbPackage was found. // Returns a *NotFoundError when no DBPackage was found.
func (dpq *DbPackageQuery) First(ctx context.Context) (*DbPackage, error) { func (dpq *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
nodes, err := dpq.Limit(1).All(setContextOp(ctx, dpq.ctx, "First")) nodes, err := dpq.Limit(1).All(setContextOp(ctx, dpq.ctx, "First"))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -72,7 +72,7 @@ func (dpq *DbPackageQuery) First(ctx context.Context) (*DbPackage, error) {
} }
// FirstX is like First, but panics if an error occurs. // FirstX is like First, but panics if an error occurs.
func (dpq *DbPackageQuery) FirstX(ctx context.Context) *DbPackage { func (dpq *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
node, err := dpq.First(ctx) node, err := dpq.First(ctx)
if err != nil && !IsNotFound(err) { if err != nil && !IsNotFound(err) {
panic(err) panic(err)
@@ -80,9 +80,9 @@ func (dpq *DbPackageQuery) FirstX(ctx context.Context) *DbPackage {
return node return node
} }
// FirstID returns the first DbPackage ID from the query. // FirstID returns the first DBPackage ID from the query.
// Returns a *NotFoundError when no DbPackage ID was found. // Returns a *NotFoundError when no DBPackage ID was found.
func (dpq *DbPackageQuery) FirstID(ctx context.Context) (id int, err error) { func (dpq *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int var ids []int
if ids, err = dpq.Limit(1).IDs(setContextOp(ctx, dpq.ctx, "FirstID")); err != nil { if ids, err = dpq.Limit(1).IDs(setContextOp(ctx, dpq.ctx, "FirstID")); err != nil {
return return
@@ -95,7 +95,7 @@ func (dpq *DbPackageQuery) FirstID(ctx context.Context) (id int, err error) {
} }
// FirstIDX is like FirstID, but panics if an error occurs. // FirstIDX is like FirstID, but panics if an error occurs.
func (dpq *DbPackageQuery) FirstIDX(ctx context.Context) int { func (dpq *DBPackageQuery) FirstIDX(ctx context.Context) int {
id, err := dpq.FirstID(ctx) id, err := dpq.FirstID(ctx)
if err != nil && !IsNotFound(err) { if err != nil && !IsNotFound(err) {
panic(err) panic(err)
@@ -103,10 +103,10 @@ func (dpq *DbPackageQuery) FirstIDX(ctx context.Context) int {
return id return id
} }
// Only returns a single DbPackage entity found by the query, ensuring it only returns one. // Only returns a single DBPackage entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one DbPackage entity is found. // Returns a *NotSingularError when more than one DBPackage entity is found.
// Returns a *NotFoundError when no DbPackage entities are found. // Returns a *NotFoundError when no DBPackage entities are found.
func (dpq *DbPackageQuery) Only(ctx context.Context) (*DbPackage, error) { func (dpq *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
nodes, err := dpq.Limit(2).All(setContextOp(ctx, dpq.ctx, "Only")) nodes, err := dpq.Limit(2).All(setContextOp(ctx, dpq.ctx, "Only"))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -122,7 +122,7 @@ func (dpq *DbPackageQuery) Only(ctx context.Context) (*DbPackage, error) {
} }
// OnlyX is like Only, but panics if an error occurs. // OnlyX is like Only, but panics if an error occurs.
func (dpq *DbPackageQuery) OnlyX(ctx context.Context) *DbPackage { func (dpq *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
node, err := dpq.Only(ctx) node, err := dpq.Only(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -130,10 +130,10 @@ func (dpq *DbPackageQuery) OnlyX(ctx context.Context) *DbPackage {
return node return node
} }
// OnlyID is like Only, but returns the only DbPackage ID in the query. // OnlyID is like Only, but returns the only DBPackage ID in the query.
// Returns a *NotSingularError when more than one DbPackage ID is found. // Returns a *NotSingularError when more than one DBPackage ID is found.
// Returns a *NotFoundError when no entities are found. // Returns a *NotFoundError when no entities are found.
func (dpq *DbPackageQuery) OnlyID(ctx context.Context) (id int, err error) { func (dpq *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int var ids []int
if ids, err = dpq.Limit(2).IDs(setContextOp(ctx, dpq.ctx, "OnlyID")); err != nil { if ids, err = dpq.Limit(2).IDs(setContextOp(ctx, dpq.ctx, "OnlyID")); err != nil {
return return
@@ -150,7 +150,7 @@ func (dpq *DbPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
} }
// OnlyIDX is like OnlyID, but panics if an error occurs. // OnlyIDX is like OnlyID, but panics if an error occurs.
func (dpq *DbPackageQuery) OnlyIDX(ctx context.Context) int { func (dpq *DBPackageQuery) OnlyIDX(ctx context.Context) int {
id, err := dpq.OnlyID(ctx) id, err := dpq.OnlyID(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -158,18 +158,18 @@ func (dpq *DbPackageQuery) OnlyIDX(ctx context.Context) int {
return id return id
} }
// All executes the query and returns a list of DbPackages. // All executes the query and returns a list of DBPackages.
func (dpq *DbPackageQuery) All(ctx context.Context) ([]*DbPackage, error) { func (dpq *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) {
ctx = setContextOp(ctx, dpq.ctx, "All") ctx = setContextOp(ctx, dpq.ctx, "All")
if err := dpq.prepareQuery(ctx); err != nil { if err := dpq.prepareQuery(ctx); err != nil {
return nil, err return nil, err
} }
qr := querierAll[[]*DbPackage, *DbPackageQuery]() qr := querierAll[[]*DBPackage, *DBPackageQuery]()
return withInterceptors[[]*DbPackage](ctx, dpq, qr, dpq.inters) return withInterceptors[[]*DBPackage](ctx, dpq, qr, dpq.inters)
} }
// AllX is like All, but panics if an error occurs. // AllX is like All, but panics if an error occurs.
func (dpq *DbPackageQuery) AllX(ctx context.Context) []*DbPackage { func (dpq *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
nodes, err := dpq.All(ctx) nodes, err := dpq.All(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -177,8 +177,8 @@ func (dpq *DbPackageQuery) AllX(ctx context.Context) []*DbPackage {
return nodes return nodes
} }
// IDs executes the query and returns a list of DbPackage IDs. // IDs executes the query and returns a list of DBPackage IDs.
func (dpq *DbPackageQuery) IDs(ctx context.Context) (ids []int, err error) { func (dpq *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
if dpq.ctx.Unique == nil && dpq.path != nil { if dpq.ctx.Unique == nil && dpq.path != nil {
dpq.Unique(true) dpq.Unique(true)
} }
@@ -190,7 +190,7 @@ func (dpq *DbPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
} }
// IDsX is like IDs, but panics if an error occurs. // IDsX is like IDs, but panics if an error occurs.
func (dpq *DbPackageQuery) IDsX(ctx context.Context) []int { func (dpq *DBPackageQuery) IDsX(ctx context.Context) []int {
ids, err := dpq.IDs(ctx) ids, err := dpq.IDs(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -199,16 +199,16 @@ func (dpq *DbPackageQuery) IDsX(ctx context.Context) []int {
} }
// Count returns the count of the given query. // Count returns the count of the given query.
func (dpq *DbPackageQuery) Count(ctx context.Context) (int, error) { func (dpq *DBPackageQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, dpq.ctx, "Count") ctx = setContextOp(ctx, dpq.ctx, "Count")
if err := dpq.prepareQuery(ctx); err != nil { if err := dpq.prepareQuery(ctx); err != nil {
return 0, err return 0, err
} }
return withInterceptors[int](ctx, dpq, querierCount[*DbPackageQuery](), dpq.inters) return withInterceptors[int](ctx, dpq, querierCount[*DBPackageQuery](), dpq.inters)
} }
// CountX is like Count, but panics if an error occurs. // CountX is like Count, but panics if an error occurs.
func (dpq *DbPackageQuery) CountX(ctx context.Context) int { func (dpq *DBPackageQuery) CountX(ctx context.Context) int {
count, err := dpq.Count(ctx) count, err := dpq.Count(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -217,7 +217,7 @@ func (dpq *DbPackageQuery) CountX(ctx context.Context) int {
} }
// Exist returns true if the query has elements in the graph. // Exist returns true if the query has elements in the graph.
func (dpq *DbPackageQuery) Exist(ctx context.Context) (bool, error) { func (dpq *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, dpq.ctx, "Exist") ctx = setContextOp(ctx, dpq.ctx, "Exist")
switch _, err := dpq.FirstID(ctx); { switch _, err := dpq.FirstID(ctx); {
case IsNotFound(err): case IsNotFound(err):
@@ -230,7 +230,7 @@ func (dpq *DbPackageQuery) Exist(ctx context.Context) (bool, error) {
} }
// ExistX is like Exist, but panics if an error occurs. // ExistX is like Exist, but panics if an error occurs.
func (dpq *DbPackageQuery) ExistX(ctx context.Context) bool { func (dpq *DBPackageQuery) ExistX(ctx context.Context) bool {
exist, err := dpq.Exist(ctx) exist, err := dpq.Exist(ctx)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -238,18 +238,18 @@ func (dpq *DbPackageQuery) ExistX(ctx context.Context) bool {
return exist return exist
} }
// Clone returns a duplicate of the DbPackageQuery builder, including all associated steps. It can be // Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made. // used to prepare common query builders and use them differently after the clone is made.
func (dpq *DbPackageQuery) Clone() *DbPackageQuery { func (dpq *DBPackageQuery) Clone() *DBPackageQuery {
if dpq == nil { if dpq == nil {
return nil return nil
} }
return &DbPackageQuery{ return &DBPackageQuery{
config: dpq.config, config: dpq.config,
ctx: dpq.ctx.Clone(), ctx: dpq.ctx.Clone(),
order: append([]dbpackage.OrderOption{}, dpq.order...), order: append([]dbpackage.OrderOption{}, dpq.order...),
inters: append([]Interceptor{}, dpq.inters...), inters: append([]Interceptor{}, dpq.inters...),
predicates: append([]predicate.DbPackage{}, dpq.predicates...), predicates: append([]predicate.DBPackage{}, dpq.predicates...),
// clone intermediate query. // clone intermediate query.
sql: dpq.sql.Clone(), sql: dpq.sql.Clone(),
path: dpq.path, path: dpq.path,
@@ -266,13 +266,13 @@ func (dpq *DbPackageQuery) Clone() *DbPackageQuery {
// Count int `json:"count,omitempty"` // Count int `json:"count,omitempty"`
// } // }
// //
// client.DbPackage.Query(). // client.DBPackage.Query().
// GroupBy(dbpackage.FieldPkgbase). // GroupBy(dbpackage.FieldPkgbase).
// Aggregate(ent.Count()). // Aggregate(ent.Count()).
// Scan(ctx, &v) // Scan(ctx, &v)
func (dpq *DbPackageQuery) GroupBy(field string, fields ...string) *DbPackageGroupBy { func (dpq *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy {
dpq.ctx.Fields = append([]string{field}, fields...) dpq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DbPackageGroupBy{build: dpq} grbuild := &DBPackageGroupBy{build: dpq}
grbuild.flds = &dpq.ctx.Fields grbuild.flds = &dpq.ctx.Fields
grbuild.label = dbpackage.Label grbuild.label = dbpackage.Label
grbuild.scan = grbuild.Scan grbuild.scan = grbuild.Scan
@@ -288,23 +288,23 @@ func (dpq *DbPackageQuery) GroupBy(field string, fields ...string) *DbPackageGro
// Pkgbase string `json:"pkgbase,omitempty"` // Pkgbase string `json:"pkgbase,omitempty"`
// } // }
// //
// client.DbPackage.Query(). // client.DBPackage.Query().
// Select(dbpackage.FieldPkgbase). // Select(dbpackage.FieldPkgbase).
// Scan(ctx, &v) // Scan(ctx, &v)
func (dpq *DbPackageQuery) Select(fields ...string) *DbPackageSelect { func (dpq *DBPackageQuery) Select(fields ...string) *DBPackageSelect {
dpq.ctx.Fields = append(dpq.ctx.Fields, fields...) dpq.ctx.Fields = append(dpq.ctx.Fields, fields...)
sbuild := &DbPackageSelect{DbPackageQuery: dpq} sbuild := &DBPackageSelect{DBPackageQuery: dpq}
sbuild.label = dbpackage.Label sbuild.label = dbpackage.Label
sbuild.flds, sbuild.scan = &dpq.ctx.Fields, sbuild.Scan sbuild.flds, sbuild.scan = &dpq.ctx.Fields, sbuild.Scan
return sbuild return sbuild
} }
// Aggregate returns a DbPackageSelect configured with the given aggregations. // Aggregate returns a DBPackageSelect configured with the given aggregations.
func (dpq *DbPackageQuery) Aggregate(fns ...AggregateFunc) *DbPackageSelect { func (dpq *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
return dpq.Select().Aggregate(fns...) return dpq.Select().Aggregate(fns...)
} }
func (dpq *DbPackageQuery) prepareQuery(ctx context.Context) error { func (dpq *DBPackageQuery) prepareQuery(ctx context.Context) error {
for _, inter := range dpq.inters { for _, inter := range dpq.inters {
if inter == nil { if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
@@ -330,16 +330,16 @@ func (dpq *DbPackageQuery) prepareQuery(ctx context.Context) error {
return nil return nil
} }
func (dpq *DbPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DbPackage, error) { func (dpq *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) {
var ( var (
nodes = []*DbPackage{} nodes = []*DBPackage{}
_spec = dpq.querySpec() _spec = dpq.querySpec()
) )
_spec.ScanValues = func(columns []string) ([]any, error) { _spec.ScanValues = func(columns []string) ([]any, error) {
return (*DbPackage).scanValues(nil, columns) return (*DBPackage).scanValues(nil, columns)
} }
_spec.Assign = func(columns []string, values []any) error { _spec.Assign = func(columns []string, values []any) error {
node := &DbPackage{config: dpq.config} node := &DBPackage{config: dpq.config}
nodes = append(nodes, node) nodes = append(nodes, node)
return node.assignValues(columns, values) return node.assignValues(columns, values)
} }
@@ -358,7 +358,7 @@ func (dpq *DbPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*D
return nodes, nil return nodes, nil
} }
func (dpq *DbPackageQuery) sqlCount(ctx context.Context) (int, error) { func (dpq *DBPackageQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dpq.querySpec() _spec := dpq.querySpec()
if len(dpq.modifiers) > 0 { if len(dpq.modifiers) > 0 {
_spec.Modifiers = dpq.modifiers _spec.Modifiers = dpq.modifiers
@@ -370,7 +370,7 @@ func (dpq *DbPackageQuery) sqlCount(ctx context.Context) (int, error) {
return sqlgraph.CountNodes(ctx, dpq.driver, _spec) return sqlgraph.CountNodes(ctx, dpq.driver, _spec)
} }
func (dpq *DbPackageQuery) querySpec() *sqlgraph.QuerySpec { func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) _spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
_spec.From = dpq.sql _spec.From = dpq.sql
if unique := dpq.ctx.Unique; unique != nil { if unique := dpq.ctx.Unique; unique != nil {
@@ -410,7 +410,7 @@ func (dpq *DbPackageQuery) querySpec() *sqlgraph.QuerySpec {
return _spec return _spec
} }
func (dpq *DbPackageQuery) sqlQuery(ctx context.Context) *sql.Selector { func (dpq *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dpq.driver.Dialect()) builder := sql.Dialect(dpq.driver.Dialect())
t1 := builder.Table(dbpackage.Table) t1 := builder.Table(dbpackage.Table)
columns := dpq.ctx.Fields columns := dpq.ctx.Fields
@@ -446,33 +446,33 @@ func (dpq *DbPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
} }
// Modify adds a query modifier for attaching custom logic to queries. // Modify adds a query modifier for attaching custom logic to queries.
func (dpq *DbPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DbPackageSelect { func (dpq *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
dpq.modifiers = append(dpq.modifiers, modifiers...) dpq.modifiers = append(dpq.modifiers, modifiers...)
return dpq.Select() return dpq.Select()
} }
// DbPackageGroupBy is the group-by builder for DbPackage entities. // DBPackageGroupBy is the group-by builder for DBPackage entities.
type DbPackageGroupBy struct { type DBPackageGroupBy struct {
selector selector
build *DbPackageQuery build *DBPackageQuery
} }
// Aggregate adds the given aggregation functions to the group-by query. // Aggregate adds the given aggregation functions to the group-by query.
func (dpgb *DbPackageGroupBy) Aggregate(fns ...AggregateFunc) *DbPackageGroupBy { func (dpgb *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy {
dpgb.fns = append(dpgb.fns, fns...) dpgb.fns = append(dpgb.fns, fns...)
return dpgb return dpgb
} }
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (dpgb *DbPackageGroupBy) Scan(ctx context.Context, v any) error { func (dpgb *DBPackageGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dpgb.build.ctx, "GroupBy") ctx = setContextOp(ctx, dpgb.build.ctx, "GroupBy")
if err := dpgb.build.prepareQuery(ctx); err != nil { if err := dpgb.build.prepareQuery(ctx); err != nil {
return err return err
} }
return scanWithInterceptors[*DbPackageQuery, *DbPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v) return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v)
} }
func (dpgb *DbPackageGroupBy) sqlScan(ctx context.Context, root *DbPackageQuery, v any) error { func (dpgb *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
selector := root.sqlQuery(ctx).Select() selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dpgb.fns)) aggregation := make([]string, 0, len(dpgb.fns))
for _, fn := range dpgb.fns { for _, fn := range dpgb.fns {
@@ -499,28 +499,28 @@ func (dpgb *DbPackageGroupBy) sqlScan(ctx context.Context, root *DbPackageQuery,
return sql.ScanSlice(rows, v) return sql.ScanSlice(rows, v)
} }
// DbPackageSelect is the builder for selecting fields of DbPackage entities. // DBPackageSelect is the builder for selecting fields of DBPackage entities.
type DbPackageSelect struct { type DBPackageSelect struct {
*DbPackageQuery *DBPackageQuery
selector selector
} }
// Aggregate adds the given aggregation functions to the selector query. // Aggregate adds the given aggregation functions to the selector query.
func (dps *DbPackageSelect) Aggregate(fns ...AggregateFunc) *DbPackageSelect { func (dps *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
dps.fns = append(dps.fns, fns...) dps.fns = append(dps.fns, fns...)
return dps return dps
} }
// Scan applies the selector query and scans the result into the given value. // Scan applies the selector query and scans the result into the given value.
func (dps *DbPackageSelect) Scan(ctx context.Context, v any) error { func (dps *DBPackageSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, dps.ctx, "Select") ctx = setContextOp(ctx, dps.ctx, "Select")
if err := dps.prepareQuery(ctx); err != nil { if err := dps.prepareQuery(ctx); err != nil {
return err return err
} }
return scanWithInterceptors[*DbPackageQuery, *DbPackageSelect](ctx, dps.DbPackageQuery, dps, dps.inters, v) return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, dps.DBPackageQuery, dps, dps.inters, v)
} }
func (dps *DbPackageSelect) sqlScan(ctx context.Context, root *DbPackageQuery, v any) error { func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
selector := root.sqlQuery(ctx) selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(dps.fns)) aggregation := make([]string, 0, len(dps.fns))
for _, fn := range dps.fns { for _, fn := range dps.fns {
@@ -542,7 +542,7 @@ func (dps *DbPackageSelect) sqlScan(ctx context.Context, root *DbPackageQuery, v
} }
// Modify adds a query modifier for attaching custom logic to queries. // Modify adds a query modifier for attaching custom logic to queries.
func (dps *DbPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DbPackageSelect { func (dps *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
dps.modifiers = append(dps.modifiers, modifiers...) dps.modifiers = append(dps.modifiers, modifiers...)
return dps return dps
} }

File diff suppressed because it is too large Load Diff

View File

@@ -9,16 +9,16 @@ import (
"somegit.dev/ALHP/ALHP.GO/ent" "somegit.dev/ALHP/ALHP.GO/ent"
) )
// The DbPackageFunc type is an adapter to allow the use of ordinary // The DBPackageFunc type is an adapter to allow the use of ordinary
// function as DbPackage mutator. // function as DBPackage mutator.
type DbPackageFunc func(context.Context, *ent.DbPackageMutation) (ent.Value, error) type DBPackageFunc func(context.Context, *ent.DBPackageMutation) (ent.Value, error)
// Mutate calls f(ctx, m). // Mutate calls f(ctx, m).
func (f DbPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { func (f DBPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if mv, ok := m.(*ent.DbPackageMutation); ok { if mv, ok := m.(*ent.DBPackageMutation); ok {
return f(ctx, mv) return f(ctx, mv)
} }
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DbPackageMutation", m) return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DBPackageMutation", m)
} }
// Condition is a hook condition function. // Condition is a hook condition function.

View File

@@ -15,13 +15,12 @@ var (
{Name: "packages", Type: field.TypeJSON, Nullable: true}, {Name: "packages", Type: field.TypeJSON, Nullable: true},
{Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "build", "queued", "delayed", "building", "latest", "signing", "unknown"}, Default: "unknown"}, {Name: "status", Type: field.TypeEnum, Nullable: true, Enums: []string{"skipped", "failed", "build", "queued", "delayed", "building", "latest", "signing", "unknown"}, Default: "unknown"},
{Name: "skip_reason", Type: field.TypeString, Nullable: true}, {Name: "skip_reason", Type: field.TypeString, Nullable: true},
{Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core", "community"}}, {Name: "repository", Type: field.TypeEnum, Enums: []string{"extra", "core"}},
{Name: "march", Type: field.TypeString}, {Name: "march", Type: field.TypeString},
{Name: "version", Type: field.TypeString, Nullable: true}, {Name: "version", Type: field.TypeString, Nullable: true},
{Name: "repo_version", Type: field.TypeString, Nullable: true}, {Name: "repo_version", Type: field.TypeString, Nullable: true},
{Name: "build_time_start", Type: field.TypeTime, Nullable: true}, {Name: "build_time_start", Type: field.TypeTime, Nullable: true},
{Name: "updated", Type: field.TypeTime, Nullable: true}, {Name: "updated", Type: field.TypeTime, Nullable: true},
{Name: "hash", Type: field.TypeString, Nullable: true},
{Name: "lto", Type: field.TypeEnum, Nullable: true, Enums: []string{"enabled", "unknown", "disabled", "auto_disabled"}, Default: "unknown"}, {Name: "lto", Type: field.TypeEnum, Nullable: true, Enums: []string{"enabled", "unknown", "disabled", "auto_disabled"}, Default: "unknown"},
{Name: "last_version_build", Type: field.TypeString, Nullable: true}, {Name: "last_version_build", Type: field.TypeString, Nullable: true},
{Name: "last_verified", Type: field.TypeTime, Nullable: true}, {Name: "last_verified", Type: field.TypeTime, Nullable: true},
@@ -31,9 +30,7 @@ var (
{Name: "s_time", Type: field.TypeInt64, Nullable: true}, {Name: "s_time", Type: field.TypeInt64, Nullable: true},
{Name: "io_in", Type: field.TypeInt64, Nullable: true}, {Name: "io_in", Type: field.TypeInt64, Nullable: true},
{Name: "io_out", Type: field.TypeInt64, Nullable: true}, {Name: "io_out", Type: field.TypeInt64, Nullable: true},
{Name: "srcinfo", Type: field.TypeString, Nullable: true, Size: 2147483647}, {Name: "tag_rev", Type: field.TypeString, Nullable: true},
{Name: "srcinfo_hash", Type: field.TypeString, Nullable: true},
{Name: "pkgbuild", Type: field.TypeString, Nullable: true},
} }
// DbPackagesTable holds the schema information for the "db_packages" table. // DbPackagesTable holds the schema information for the "db_packages" table.
DbPackagesTable = &schema.Table{ DbPackagesTable = &schema.Table{

File diff suppressed because it is too large Load Diff

View File

@@ -6,5 +6,5 @@ import (
"entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql"
) )
// DbPackage is the predicate function for dbpackage builders. // DBPackage is the predicate function for dbpackage builders.
type DbPackage func(*sql.Selector) type DBPackage func(*sql.Selector)

View File

@@ -11,7 +11,7 @@ import (
// (default values, validators, hooks and policies) and stitches it // (default values, validators, hooks and policies) and stitches it
// to their package variables. // to their package variables.
func init() { func init() {
dbpackageFields := schema.DbPackage{}.Fields() dbpackageFields := schema.DBPackage{}.Fields()
_ = dbpackageFields _ = dbpackageFields
// dbpackageDescPkgbase is the schema descriptor for pkgbase field. // dbpackageDescPkgbase is the schema descriptor for pkgbase field.
dbpackageDescPkgbase := dbpackageFields[0].Descriptor() dbpackageDescPkgbase := dbpackageFields[0].Descriptor()

View File

@@ -5,26 +5,25 @@ import (
"entgo.io/ent/schema/field" "entgo.io/ent/schema/field"
) )
// DbPackage holds the schema definition for the DbPackage entity. // DBPackage holds the schema definition for the DbPackage entity.
type DbPackage struct { type DBPackage struct {
ent.Schema ent.Schema
} }
// Fields of the DbPackage. // Fields of the DBPackage.
func (DbPackage) Fields() []ent.Field { func (DBPackage) Fields() []ent.Field {
return []ent.Field{ return []ent.Field{
field.String("pkgbase").NotEmpty().Immutable(), field.String("pkgbase").NotEmpty().Immutable(),
field.Strings("packages").Optional(), field.Strings("packages").Optional(),
field.Enum("status").Values("skipped", "failed", "build", "queued", "delayed", "building", field.Enum("status").Values("skipped", "failed", "build", "queued", "delayed", "building",
"latest", "signing", "unknown").Default("unknown").Optional(), "latest", "signing", "unknown").Default("unknown").Optional(),
field.String("skip_reason").Optional(), field.String("skip_reason").Optional(),
field.Enum("repository").Values("extra", "core", "community"), field.Enum("repository").Values("extra", "core"),
field.String("march").NotEmpty().Immutable(), field.String("march").NotEmpty().Immutable(),
field.String("version").Optional(), field.String("version").Optional(),
field.String("repo_version").Optional(), field.String("repo_version").Optional(),
field.Time("build_time_start").Optional(), field.Time("build_time_start").Optional(),
field.Time("updated").Optional(), field.Time("updated").Optional(),
field.String("hash").Optional(),
field.Enum("lto").Values("enabled", "unknown", "disabled", "auto_disabled").Default("unknown").Optional(), field.Enum("lto").Values("enabled", "unknown", "disabled", "auto_disabled").Default("unknown").Optional(),
field.String("last_version_build").Optional(), field.String("last_version_build").Optional(),
field.Time("last_verified").Optional(), field.Time("last_verified").Optional(),
@@ -34,13 +33,11 @@ func (DbPackage) Fields() []ent.Field {
field.Int64("s_time").Optional().Nillable(), field.Int64("s_time").Optional().Nillable(),
field.Int64("io_in").Optional().Nillable(), field.Int64("io_in").Optional().Nillable(),
field.Int64("io_out").Optional().Nillable(), field.Int64("io_out").Optional().Nillable(),
field.Text("srcinfo").Optional().Nillable(), field.String("tag_rev").Optional().Nillable(),
field.String("srcinfo_hash").Optional(),
field.String("pkgbuild").Optional(),
} }
} }
// Edges of the DbPackage. // Edges of the DBPackage.
func (DbPackage) Edges() []ent.Edge { func (DBPackage) Edges() []ent.Edge {
return nil return nil
} }

View File

@@ -12,8 +12,8 @@ import (
// Tx is a transactional client that is created by calling Client.Tx(). // Tx is a transactional client that is created by calling Client.Tx().
type Tx struct { type Tx struct {
config config
// DbPackage is the client for interacting with the DbPackage builders. // DBPackage is the client for interacting with the DBPackage builders.
DbPackage *DbPackageClient DBPackage *DBPackageClient
// lazily loaded. // lazily loaded.
client *Client client *Client
@@ -145,7 +145,7 @@ func (tx *Tx) Client() *Client {
} }
func (tx *Tx) init() { func (tx *Tx) init() {
tx.DbPackage = NewDbPackageClient(tx.config) tx.DBPackage = NewDBPackageClient(tx.config)
} }
// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
@@ -155,7 +155,7 @@ func (tx *Tx) init() {
// of them in order to commit or rollback the transaction. // of them in order to commit or rollback the transaction.
// //
// If a closed transaction is embedded in one of the generated entities, and the entity // If a closed transaction is embedded in one of the generated entities, and the entity
// applies a query, for example: DbPackage.QueryXXX(), the query will be executed // applies a query, for example: DBPackage.QueryXXX(), the query will be executed
// through the driver which created this transaction. // through the driver which created this transaction.
// //
// Note that txDriver is not goroutine safe. // Note that txDriver is not goroutine safe.

8
go.mod
View File

@@ -12,14 +12,13 @@ require (
github.com/jackc/pgx/v4 v4.18.1 github.com/jackc/pgx/v4 v4.18.1
github.com/otiai10/copy v1.11.0 github.com/otiai10/copy v1.11.0
github.com/sethvargo/go-retry v0.2.4 github.com/sethvargo/go-retry v0.2.4
github.com/sirupsen/logrus v1.9.0 github.com/sirupsen/logrus v1.9.2
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
lukechampine.com/blake3 v1.1.7
) )
require ( require (
ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf // indirect ariga.io/atlas v0.11.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
@@ -33,10 +32,9 @@ require (
github.com/jackc/pgproto3/v2 v2.3.2 // indirect github.com/jackc/pgproto3/v2 v2.3.2 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgtype v1.14.0 // indirect github.com/jackc/pgtype v1.14.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/zclconf/go-cty v1.13.1 // indirect github.com/zclconf/go-cty v1.13.1 // indirect
golang.org/x/crypto v0.8.0 // indirect golang.org/x/crypto v0.9.0 // indirect
golang.org/x/mod v0.10.0 // indirect golang.org/x/mod v0.10.0 // indirect
golang.org/x/sys v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect golang.org/x/text v0.9.0 // indirect

18
go.sum
View File

@@ -1,5 +1,5 @@
ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf h1:Tq2DRB39ZHScIwWACjPKLv5oEErv7zv6PBb5RTz5CKA= ariga.io/atlas v0.11.0 h1:aGR7MzsUfmdlDYCpRErQeY2NSuRlPE0/q6drNE/5buM=
ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf/go.mod h1:+TR129FJZ5Lvzms6dvCeGWh1yR6hMvmXBhug4hrNIGk= ariga.io/atlas v0.11.0/go.mod h1:+TR129FJZ5Lvzms6dvCeGWh1yR6hMvmXBhug4hrNIGk=
entgo.io/ent v0.12.3 h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk= entgo.io/ent v0.12.3 h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk=
entgo.io/ent v0.12.3/go.mod h1:AigGGx+tbrBBYHAzGOg8ND661E5cxx1Uiu5o/otJ6Yg= entgo.io/ent v0.12.3/go.mod h1:AigGGx+tbrBBYHAzGOg8ND661E5cxx1Uiu5o/otJ6Yg=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -92,9 +92,6 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -137,8 +134,8 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -181,8 +178,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
@@ -211,7 +208,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -257,5 +253,3 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=

View File

@@ -15,7 +15,7 @@ import (
func housekeeping(repo, march string, wg *sync.WaitGroup) error { func housekeeping(repo, march string, wg *sync.WaitGroup) error {
defer wg.Done() defer wg.Done()
fullRepo := repo + "-" + march fullRepo := repo + "-" + march
log.Debugf("[%s] Start housekeeping", fullRepo) log.Debugf("[%s] start housekeeping", fullRepo)
packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst")) packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst"))
if err != nil { if err != nil {
return err return err
@@ -29,9 +29,9 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
if ent.IsNotFound(err) { if ent.IsNotFound(err) {
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path)) log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
pkg := &ProtoPackage{ pkg := &ProtoPackage{
FullRepo: mPackage.FullRepo(), FullRepo: *mPackage.FullRepo(),
PkgFiles: []string{path}, PkgFiles: []string{path},
March: mPackage.MArch(), March: *mPackage.MArch(),
} }
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
continue continue
@@ -43,22 +43,12 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
pkg := &ProtoPackage{ pkg := &ProtoPackage{
Pkgbase: dbPkg.Pkgbase, Pkgbase: dbPkg.Pkgbase,
Repo: mPackage.Repo(), Repo: mPackage.Repo(),
FullRepo: mPackage.FullRepo(), FullRepo: *mPackage.FullRepo(),
DBPackage: dbPkg, DBPackage: dbPkg,
March: mPackage.MArch(), March: *mPackage.MArch(),
Arch: mPackage.Arch(), Arch: *mPackage.Arch(),
} }
var upstream string
switch pkg.DBPackage.Repository {
case dbpackage.RepositoryCore, dbpackage.RepositoryExtra:
upstream = "upstream-core-extra"
case dbpackage.RepositoryCommunity:
upstream = "upstream-community"
}
pkg.Pkgbuild = filepath.Join(conf.Basedir.Work, upstreamDir, upstream, dbPkg.Pkgbase, "repos",
pkg.DBPackage.Repository.String()+"-"+conf.Arch, "PKGBUILD")
// check if package is still part of repo // check if package is still part of repo
dbs, err := alpmHandle.SyncDBs() dbs, err := alpmHandle.SyncDBs()
if err != nil { if err != nil {
@@ -72,7 +62,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
// package not found on mirror/db -> not part of any repo anymore // package not found on mirror/db -> not part of any repo anymore
log.Infof("[HK] %s->%s not included in repo", pkg.FullRepo, mPackage.Name()) log.Infof("[HK] %s->%s not included in repo", pkg.FullRepo, mPackage.Name())
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
err = db.DbPackage.DeleteOne(pkg.DBPackage).Exec(context.Background()) err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(context.Background())
if err != nil { if err != nil {
return err return err
} }
@@ -100,7 +90,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
repoVer, err := pkg.repoVersion() repoVer, err := pkg.repoVersion()
if err == nil && repoVer != dbPkg.RepoVersion { if err == nil && repoVer != dbPkg.RepoVersion {
log.Infof("[HK] %s->%s update repoVersion %s->%s", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer) log.Infof("[HK] %s->%s update repoVersion %s->%s", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer)
pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearHash().Save(context.Background()) pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearTagRev().Save(context.Background())
if err != nil { if err != nil {
return err return err
} }
@@ -108,7 +98,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
} }
// check all packages from db for existence // check all packages from db for existence
dbPackages, err := db.DbPackage.Query().Where( dbPackages, err := db.DBPackage.Query().Where(
dbpackage.And( dbpackage.And(
dbpackage.RepositoryEQ(dbpackage.Repository(repo)), dbpackage.RepositoryEQ(dbpackage.Repository(repo)),
dbpackage.March(march), dbpackage.March(march),
@@ -130,9 +120,9 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
if !pkg.isAvailable(alpmHandle) { if !pkg.isAvailable(alpmHandle) {
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase) log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
err = db.DbPackage.DeleteOne(dbPkg).Exec(context.Background()) err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
if err != nil { if err != nil {
log.Errorf("[HK] Error deleting package %s: %v", dbPkg.Pkgbase, err) log.Errorf("[HK] error deleting package %s->%s: %v", pkg.FullRepo, dbPkg.Pkgbase, err)
} }
continue continue
} }
@@ -165,7 +155,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
} }
if len(missingSplits) > 0 { if len(missingSplits) > 0 {
log.Infof("[HK] %s->%s missing split-package(s): %s", fullRepo, dbPkg.Pkgbase, missingSplits) log.Infof("[HK] %s->%s missing split-package(s): %s", fullRepo, dbPkg.Pkgbase, missingSplits)
pkg.DBPackage, err = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background()) pkg.DBPackage, err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(context.Background())
if err != nil { if err != nil {
return err return err
} }
@@ -180,7 +170,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
} }
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "": case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase) log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearHash().ClearRepoVersion().Exec(context.Background()) err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(context.Background())
if err != nil { if err != nil {
return err return err
} }
@@ -224,7 +214,7 @@ func logHK() error {
continue continue
} }
pkgSkipped, err := db.DbPackage.Query().Where( pkgSkipped, err := db.DBPackage.Query().Where(
dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.Pkgbase(pkg.Pkgbase),
dbpackage.March(pkg.March), dbpackage.March(pkg.March),
dbpackage.StatusEQ(dbpackage.StatusSkipped), dbpackage.StatusEQ(dbpackage.StatusSkipped),
@@ -245,8 +235,8 @@ func logHK() error {
sLogContent := string(logContent) sLogContent := string(logContent)
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) { if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) {
rows, err := db.DbPackage.Update().Where(dbpackage.And(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March), rows, err := db.DBPackage.Update().Where(dbpackage.And(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
dbpackage.StatusEQ(dbpackage.StatusFailed))).ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background()) dbpackage.StatusEQ(dbpackage.StatusFailed))).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(context.Background())
if err != nil { if err != nil {
return err return err
} }
@@ -255,12 +245,12 @@ func logHK() error {
log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows) log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows)
} }
} else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) { } else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) {
rows, err := db.DbPackage.Update().Where( rows, err := db.DBPackage.Update().Where(
dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.Pkgbase(pkg.Pkgbase),
dbpackage.March(pkg.March), dbpackage.March(pkg.March),
dbpackage.StatusEQ(dbpackage.StatusFailed), dbpackage.StatusEQ(dbpackage.StatusFailed),
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled), dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
).ClearHash().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background()) ).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background())
if err != nil { if err != nil {
return err return err
} }

30
main.go
View File

@@ -40,17 +40,17 @@ func main() {
confStr, err := os.ReadFile("config.yaml") confStr, err := os.ReadFile("config.yaml")
if err != nil { if err != nil {
log.Fatalf("Error reading config file: %v", err) log.Fatalf("error reading config file: %v", err)
} }
err = yaml.Unmarshal(confStr, &conf) err = yaml.Unmarshal(confStr, &conf)
if err != nil { if err != nil {
log.Fatalf("Error parsing config file: %v", err) log.Fatalf("error parsing config file: %v", err)
} }
lvl, err := log.ParseLevel(conf.Logging.Level) lvl, err := log.ParseLevel(conf.Logging.Level)
if err != nil { if err != nil {
log.Fatalf("Error parsing log level from config: %v", err) log.Fatalf("error parsing log level from config: %v", err)
} }
log.SetLevel(lvl) log.SetLevel(lvl)
if *journalLog { if *journalLog {
@@ -59,18 +59,18 @@ func main() {
err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, 5) err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, 5)
if err != nil { if err != nil {
log.Infof("Failed to drop priority: %v", err) log.Infof("failed to drop priority: %v", err)
} }
err = os.MkdirAll(conf.Basedir.Repo, 0o755) err = os.MkdirAll(conf.Basedir.Repo, 0o755)
if err != nil { if err != nil {
log.Fatalf("Error creating repo dir: %v", err) log.Fatalf("error creating repo dir: %v", err)
} }
if conf.DB.Driver == "pgx" { if conf.DB.Driver == "pgx" {
pdb, err := sql.Open("pgx", conf.DB.ConnectTo) pdb, err := sql.Open("pgx", conf.DB.ConnectTo)
if err != nil { if err != nil {
log.Fatalf("Failed to open database %s: %v", conf.DB.ConnectTo, err) log.Fatalf("failed to open database %s: %v", conf.DB.ConnectTo, err)
} }
drv := sql.OpenDB(dialect.Postgres, pdb.DB()) drv := sql.OpenDB(dialect.Postgres, pdb.DB())
@@ -78,7 +78,7 @@ func main() {
} else { } else {
db, err = ent.Open(conf.DB.Driver, conf.DB.ConnectTo) db, err = ent.Open(conf.DB.Driver, conf.DB.ConnectTo)
if err != nil { if err != nil {
log.Panicf("Failed to open database %s: %v", conf.DB.ConnectTo, err) log.Panicf("failed to open database %s: %v", conf.DB.ConnectTo, err)
} }
defer func(Client *ent.Client) { defer func(Client *ent.Client) {
_ = Client.Close() _ = Client.Close()
@@ -86,7 +86,7 @@ func main() {
} }
if err := db.Schema.Create(context.Background(), migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil { if err := db.Schema.Create(context.Background(), migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
log.Panicf("Automigrate failed: %v", err) log.Panicf("automigrate failed: %v", err)
} }
buildManager = &BuildManager{ buildManager = &BuildManager{
@@ -101,17 +101,17 @@ func main() {
err = setupChroot() err = setupChroot()
if err != nil { if err != nil {
log.Panicf("Unable to setup chroot: %v", err) log.Panicf("unable to setup chroot: %v", err)
} }
err = syncMarchs() err = syncMarchs()
if err != nil { if err != nil {
log.Panicf("Error syncing marchs: %v", err) log.Panicf("error syncing marchs: %v", err)
} }
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman")) filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
if err != nil { if err != nil {
log.Panicf("Error while ALPM-init: %v", err) log.Panicf("error while ALPM-init: %v", err)
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -129,20 +129,20 @@ killLoop:
case <-reloadSignals: case <-reloadSignals:
confStr, err := os.ReadFile("config.yaml") confStr, err := os.ReadFile("config.yaml")
if err != nil { if err != nil {
log.Panicf("Unable to open config: %v", err) log.Panicf("unable to open config: %v", err)
} }
err = yaml.Unmarshal(confStr, &conf) err = yaml.Unmarshal(confStr, &conf)
if err != nil { if err != nil {
log.Panicf("Unable to parse config: %v", err) log.Panicf("unable to parse config: %v", err)
} }
lvl, err := log.ParseLevel(conf.Logging.Level) lvl, err := log.ParseLevel(conf.Logging.Level)
if err != nil { if err != nil {
log.Panicf("Failure setting logging level: %v", err) log.Panicf("failure setting logging level: %v", err)
} }
log.SetLevel(lvl) log.SetLevel(lvl)
log.Infof("Config reloaded") log.Infof("config reloaded")
} }
} }

View File

@@ -22,9 +22,10 @@ func (pkg Package) Name() string {
} }
// MArch returns package's march // MArch returns package's march
func (pkg Package) MArch() string { func (pkg Package) MArch() *string {
splitPath := strings.Split(string(pkg), string(filepath.Separator)) splitPath := strings.Split(string(pkg), string(filepath.Separator))
return strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-") res := strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
return &res
} }
// Repo returns package's dbpackage.Repository // Repo returns package's dbpackage.Repository
@@ -34,9 +35,9 @@ func (pkg Package) Repo() dbpackage.Repository {
} }
// FullRepo returns package's dbpackage.Repository-march // FullRepo returns package's dbpackage.Repository-march
func (pkg Package) FullRepo() string { func (pkg Package) FullRepo() *string {
splitPath := strings.Split(string(pkg), string(filepath.Separator)) splitPath := strings.Split(string(pkg), string(filepath.Separator))
return splitPath[len(splitPath)-4] return &splitPath[len(splitPath)-4]
} }
// Version returns version extracted from package // Version returns version extracted from package
@@ -46,10 +47,10 @@ func (pkg Package) Version() string {
} }
// Arch returns package's Architecture // Arch returns package's Architecture
func (pkg Package) Arch() string { func (pkg Package) Arch() *string {
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-") fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".") fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".")
return fNameSplit[0] return &fNameSplit[0]
} }
// HasValidSignature returns if package has valid detached signature file // HasValidSignature returns if package has valid detached signature file
@@ -69,13 +70,13 @@ func (pkg Package) HasValidSignature() (bool, error) {
} }
// DBPackage returns ent.DBPackage for package // DBPackage returns ent.DBPackage for package
func (pkg Package) DBPackage(db *ent.Client) (*ent.DbPackage, error) { func (pkg Package) DBPackage(db *ent.Client) (*ent.DBPackage, error) {
return pkg.DBPackageIsolated(pkg.MArch(), pkg.Repo(), db) return pkg.DBPackageIsolated(*pkg.MArch(), pkg.Repo(), db)
} }
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo // DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DbPackage, error) { func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
dbPkg, err := db.DbPackage.Query().Where(func(s *sql.Selector) { dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
s.Where( s.Where(
sql.And( sql.And(
sqljson.ValueContains(dbpackage.FieldPackages, pkg.Name()), sqljson.ValueContains(dbpackage.FieldPackages, pkg.Name()),

View File

@@ -1,25 +0,0 @@
package main
import (
"path/filepath"
"strings"
)
type PKGBUILD string
// FullRepo returns full-repo from PKGBUILD'S path
func (p PKGBUILD) FullRepo() string {
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
return sPkgbuild[len(sPkgbuild)-2]
}
// Repo returns repo from PKGBUILD's path
func (p PKGBUILD) Repo() string {
return strings.Split(p.FullRepo(), "-")[0]
}
// PkgBase returns pkgbase from PKGBUILD's path
func (p PKGBUILD) PkgBase() string {
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
return sPkgbuild[len(sPkgbuild)-4]
}

View File

@@ -25,7 +25,6 @@ import (
type ProtoPackage struct { type ProtoPackage struct {
Pkgbase string Pkgbase string
Pkgbuild string
Srcinfo *srcinfo.Srcinfo Srcinfo *srcinfo.Srcinfo
Arch string Arch string
PkgFiles []string PkgFiles []string
@@ -33,54 +32,46 @@ type ProtoPackage struct {
March string March string
FullRepo string FullRepo string
Version string Version string
Hash string DBPackage *ent.DBPackage
DBPackage *ent.DbPackage Pkgbuild string
State *StateInfo
} }
func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) { var (
if err := p.genSrcinfo(); err != nil { ErrorNotEligible = errors.New("package is not eligible")
return false, fmt.Errorf("error generating SRCINFO: %w", err) )
}
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
if !p.isAvailable(alpmHandle) { if !p.isAvailable(alpmHandle) {
log.Debugf("[%s/%s] Not available on mirror, skipping build", p.FullRepo, p.Pkgbase) log.Debugf("[%s/%s] not available on mirror, skipping build", p.FullRepo, p.Pkgbase)
return false, nil return false, nil
} }
p.toDBPackage(true)
skipping := false skipping := false
switch { switch {
case Contains(p.Srcinfo.Arch, "any"): case Contains(p.Srcinfo.Arch, "any"):
log.Debugf("Skipped %s: any-Package", p.Srcinfo.Pkgbase) log.Debugf("skipped %s: any-package", p.Srcinfo.Pkgbase)
p.DBPackage.SkipReason = "arch = any" p.DBPackage.SkipReason = "arch = any"
p.DBPackage.Status = dbpackage.StatusSkipped p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true skipping = true
case Contains(conf.Blacklist.Packages, p.Srcinfo.Pkgbase): case Contains(conf.Blacklist.Packages, p.Srcinfo.Pkgbase):
log.Debugf("Skipped %s: blacklisted package", p.Srcinfo.Pkgbase) log.Debugf("skipped %s: blacklisted package", p.Pkgbase)
p.DBPackage.SkipReason = "blacklisted" p.DBPackage.SkipReason = "blacklisted"
p.DBPackage.Status = dbpackage.StatusSkipped p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true skipping = true
case Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc"):
log.Debugf("Skipped %s: haskell package", p.Srcinfo.Pkgbase)
p.DBPackage.SkipReason = "blacklisted (haskell)"
p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit:
log.Debugf("Skipped %s: memory limit exceeded (%s)", p.Srcinfo.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) log.Debugf("skipped %s: memory limit exceeded (%s)", p.Srcinfo.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB)
p.DBPackage.SkipReason = "memory limit exceeded" p.DBPackage.SkipReason = "memory limit exceeded"
p.DBPackage.Status = dbpackage.StatusSkipped p.DBPackage.Status = dbpackage.StatusSkipped
skipping = true skipping = true
case p.isPkgFailed(): case p.isPkgFailed():
log.Debugf("Skipped %s: failed build", p.Srcinfo.Pkgbase) log.Debugf("skipped %s: failed build", p.Srcinfo.Pkgbase)
skipping = true skipping = true
} }
if skipping { if skipping {
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version). p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SetStatus(p.DBPackage.Status).
SetPackages(packages2slice(p.Srcinfo.Packages)).SetStatus(p.DBPackage.Status). SetSkipReason(p.DBPackage.SkipReason).SetTagRev(p.State.TagRev).SaveX(ctx)
SetSkipReason(p.DBPackage.SkipReason).SetHash(p.Hash).SaveX(ctx)
return false, nil return false, nil
} else { } else {
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SaveX(ctx) p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SaveX(ctx)
@@ -94,8 +85,8 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
if err != nil { if err != nil {
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx) p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
} else if err == nil && alpm.VerCmp(repoVer, p.Version) > 0 { } else if err == nil && alpm.VerCmp(repoVer, p.Version) > 0 {
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", p.Srcinfo.Pkgbase, p.Version, repoVer) log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", p.Srcinfo.Pkgbase, p.Version, repoVer)
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetHash(p.Hash).SaveX(ctx) p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(p.State.TagRev).SaveX(ctx)
return false, nil return false, nil
} }
@@ -104,12 +95,12 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
switch err.(type) { switch err.(type) {
default: default:
return false, fmt.Errorf("error solving deps: %w", err) return false, fmt.Errorf("error solving deps: %w", err)
case MultiplePKGBUILDError: case MultipleStateFilesError:
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err) log.Infof("skipped %s: Multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx) p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
return false, err return false, err
case UnableToSatisfyError: case UnableToSatisfyError:
log.Infof("Skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err) log.Infof("skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx) p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
return false, err return false, err
} }
@@ -119,7 +110,7 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
if !isLatest { if !isLatest {
if local != nil { if local != nil {
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)", log.Infof("delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)",
p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion) p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed). p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).
SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx) SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
@@ -133,7 +124,7 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
return false, errors.New("overdue package waiting") return false, errors.New("overdue package waiting")
} }
} else { } else {
log.Infof("Delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase) log.Infof("delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx) p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx)
} }
return false, nil return false, nil
@@ -146,22 +137,6 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
start := time.Now().UTC() start := time.Now().UTC()
chroot := "build_" + uuid.New().String() chroot := "build_" + uuid.New().String()
err := p.genSrcinfo()
if err != nil {
return time.Since(start), fmt.Errorf("error generating srcinfo: %w", err)
}
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
log.Infof("[P] build starting: %s->%s->%s", p.FullRepo, p.Pkgbase, p.Version)
p.toDBPackage(true)
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
err = p.importKeys()
if err != nil {
log.Warningf("[P] failed to import pgp keys for %s->%s->%s: %v", p.FullRepo, p.Pkgbase, p.Version, err)
}
buildFolder, err := p.setupBuildDir() buildFolder, err := p.setupBuildDir()
if err != nil { if err != nil {
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err) return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
@@ -175,6 +150,30 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
} }
}() }()
err = p.genSrcinfo()
if err != nil {
return time.Since(start), fmt.Errorf("error generating srcinfo: %w", err)
}
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
elig, err := p.isEligible(context.Background())
if err != nil {
log.Warningf("[QG] %s->%s: %v", p.FullRepo, p.Pkgbase, err)
}
if !elig {
return time.Since(start), ErrorNotEligible
}
log.Infof("[P] build starting: %s->%s->%s", p.FullRepo, p.Pkgbase, p.Version)
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
err = p.importKeys()
if err != nil {
log.Warningf("[P] failed to import pgp keys for %s->%s->%s: %v", p.FullRepo, p.Pkgbase, p.Version, err)
}
buildNo := 1 buildNo := 1
versionSlice := strings.Split(p.DBPackage.LastVersionBuild, ".") versionSlice := strings.Split(p.DBPackage.LastVersionBuild, ".")
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version { if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version {
@@ -253,7 +252,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
ClearIoIn(). ClearIoIn().
ClearUTime(). ClearUTime().
ClearSTime(). ClearSTime().
SetHash(p.Hash). SetTagRev(p.State.TagRev).
ExecX(ctx) ExecX(ctx)
return time.Since(start), fmt.Errorf("build failed: exit code %d", cmd.ProcessState.ExitCode()) return time.Since(start), fmt.Errorf("build failed: exit code %d", cmd.ProcessState.ExitCode())
} }
@@ -308,7 +307,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
SetLto(dbpackage.LtoEnabled). SetLto(dbpackage.LtoEnabled).
SetBuildTimeStart(start). SetBuildTimeStart(start).
SetLastVersionBuild(p.Version). SetLastVersionBuild(p.Version).
SetHash(p.Hash). SetTagRev(p.State.TagRev).
SetMaxRss(Rusage.Maxrss). SetMaxRss(Rusage.Maxrss).
SetIoOut(Rusage.Oublock). SetIoOut(Rusage.Oublock).
SetIoIn(Rusage.Inblock). SetIoIn(Rusage.Inblock).
@@ -337,19 +336,16 @@ func (p *ProtoPackage) setupBuildDir() (string, error) {
return "", err return "", err
} }
files, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*")) cmd := exec.Command("git", "clone", "--depth", "1", "--branch", p.State.TagVer,
fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s.git", p.Pkgbase), buildDir)
res, err := cmd.CombinedOutput()
log.Debug(string(res))
if err != nil { if err != nil {
log.Fatalf("error cloning package repo %s: %v", p.Pkgbase, err)
return "", err return "", err
} }
for _, file := range files {
err = copy.Copy(file, filepath.Join(buildDir, filepath.Base(file)))
if err != nil {
return "", err
}
}
p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD") p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD")
return buildDir, nil return buildDir, nil
} }
@@ -444,8 +440,27 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
var pkg alpm.IPackage var pkg alpm.IPackage
if p.Srcinfo != nil { if p.Srcinfo != nil {
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname) pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
} else { } else if len(p.DBPackage.Packages) > 0 {
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0]) pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
} else {
cmd := exec.Command("unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String())
var res []byte
res, err = cmd.CombinedOutput()
log.Debug(string(res))
if err != nil || len(res) == 0 {
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
buildManager.alpmMutex.Unlock()
return false
}
if len(strings.Split(strings.TrimSpace(string(res)), "\n")) > 0 {
splitOut := strings.Split(strings.Split(strings.TrimSpace(string(res)), "\n")[0], "/")
pkg, err = dbs.FindSatisfier(splitOut[1])
} else {
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
buildManager.alpmMutex.Unlock()
return false
}
} }
buildManager.alpmMutex.Unlock() buildManager.alpmMutex.Unlock()
if err != nil { if err != nil {
@@ -467,27 +482,31 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
return true return true
} }
func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) { func (p *ProtoPackage) GitVersion(h *alpm.Handle) (string, error) {
if p.Pkgbuild == "" && p.Pkgbase == "" { if p.Pkgbase == "" {
return "", fmt.Errorf("invalid arguments") return "", fmt.Errorf("invalid arguments")
} }
pkgBuilds, _ := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD")) stateFiles, _ := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase))
var fPkgbuilds []string var fStateFiles []string
for _, pkgbuild := range pkgBuilds { for _, stateFile := range stateFiles {
mPkgbuild := PKGBUILD(pkgbuild) _, subRepo, _, err := stateFileMeta(stateFile)
if mPkgbuild.FullRepo() == "trunk" || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) { if err != nil {
continue continue
} }
if !Contains(fPkgbuilds, pkgbuild) { if subRepo != nil {
fPkgbuilds = append(fPkgbuilds, pkgbuild) continue
}
if !Contains(fStateFiles, stateFile) {
fStateFiles = append(fStateFiles, stateFile)
} }
} }
if len(fPkgbuilds) > 1 { if len(fStateFiles) > 1 {
log.Infof("%s: multiple PKGBUILD found, try resolving from mirror", p.Pkgbase) log.Infof("%s: multiple statefiles found, try resolving from mirror", p.Pkgbase)
dbs, err := h.SyncDBs() dbs, err := h.SyncDBs()
if err != nil { if err != nil {
return "", err return "", err
@@ -501,54 +520,37 @@ func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
} }
pkgloop: pkgloop:
for _, pkgbuild := range fPkgbuilds { for _, stateFile := range fStateFiles {
repo := strings.Split(filepath.Base(filepath.Dir(pkgbuild)), "-")[0] repo, _, _, err := stateFileMeta(stateFile)
upstreamA := strings.Split(filepath.Dir(pkgbuild), "/") if err != nil {
upstream := upstreamA[len(upstreamA)-4] continue
}
switch upstream { if iPackage.DB().Name() == repo {
case "upstream-core-extra": fStateFiles = []string{stateFile}
if iPackage.DB().Name() == repo && (repo == "extra" || repo == "core") { break pkgloop
fPkgbuilds = []string{pkgbuild}
break pkgloop
}
case "upstream-community":
if iPackage.DB().Name() == repo && repo == "community" {
fPkgbuilds = []string{pkgbuild}
break pkgloop
}
} }
} }
if len(fPkgbuilds) > 1 { if len(fStateFiles) > 1 {
return "", MultiplePKGBUILDError{fmt.Errorf("%s: multiple PKGBUILD found: %s", p.Pkgbase, fPkgbuilds)} return "", MultipleStateFilesError{fmt.Errorf("%s: multiple statefiles found: %s", p.Pkgbase, fStateFiles)}
} }
log.Infof("%s: resolving successful: MirrorRepo=%s; PKGBUILD chosen: %s", p.Pkgbase, iPackage.DB().Name(), fPkgbuilds[0]) log.Infof("%s: resolving successful: MirrorRepo=%s; statefile chosen: %s", p.Pkgbase, iPackage.DB().Name(), fStateFiles[0])
} else if len(fPkgbuilds) == 0 { } else if len(fStateFiles) == 0 {
return "", fmt.Errorf("%s: no matching PKGBUILD found (searched: %s, canidates: %s)", p.Pkgbase, return "", fmt.Errorf("%s: no matching statefile found (searched: %s, canidates: %s)", p.Pkgbase,
filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"), pkgBuilds) filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase), stateFiles)
} }
pPkg := PKGBUILD(fPkgbuilds[0]) rawState, err := os.ReadFile(fStateFiles[0])
dbPkg, err := db.DbPackage.Query().Where(dbpackage.RepositoryEQ(dbpackage.Repository(pPkg.Repo())),
dbpackage.March(p.March), dbpackage.Pkgbase(p.Pkgbase)).Only(context.Background())
if err == nil {
return dbPkg.Version, nil
}
cmd := exec.Command("makepkg", "--printsrcinfo")
cmd.Dir = filepath.Dir(fPkgbuilds[0])
res, err := cmd.Output()
if err != nil { if err != nil {
return "", err return "", fmt.Errorf("error reading statefile %s: %w", fStateFiles[0], err)
} }
state, err := parseState(string(rawState))
info, err := srcinfo.Parse(string(res))
if err != nil { if err != nil {
return "", err return "", fmt.Errorf("error parsing statefile: %w", err)
} }
return constructVersion(info.Pkgver, info.Pkgrel, info.Epoch), nil return state.PkgVer, nil
} }
func (p *ProtoPackage) isPkgFailed() bool { func (p *ProtoPackage) isPkgFailed() bool {
@@ -556,14 +558,6 @@ func (p *ProtoPackage) isPkgFailed() bool {
return false return false
} }
if err := p.genSrcinfo(); err != nil {
return false
}
if p.Version == "" {
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
}
if alpm.VerCmp(p.DBPackage.Version, p.Version) < 0 { if alpm.VerCmp(p.DBPackage.Version, p.Version) < 0 {
return false return false
} }
@@ -575,16 +569,7 @@ func (p *ProtoPackage) genSrcinfo() error {
return nil return nil
} }
if p.DBPackage != nil && p.DBPackage.Srcinfo != nil { cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild))
var err error
p.Srcinfo, err = srcinfo.Parse(*p.DBPackage.Srcinfo)
if err != nil {
return err
}
return nil
}
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild)) //nolint:gosec
cmd.Dir = filepath.Dir(p.Pkgbuild) cmd.Dir = filepath.Dir(p.Pkgbuild)
res, err := cmd.CombinedOutput() res, err := cmd.CombinedOutput()
if err != nil { if err != nil {
@@ -595,11 +580,7 @@ func (p *ProtoPackage) genSrcinfo() error {
if err != nil { if err != nil {
return err return err
} }
p.Srcinfo = info p.Srcinfo = info
if p.DBPackage != nil {
p.DBPackage = p.DBPackage.Update().SetSrcinfoHash(p.Hash).SetSrcinfo(string(res)).SaveX(context.Background())
}
return nil return nil
} }
@@ -638,27 +619,32 @@ func (p *ProtoPackage) findPkgFiles() error {
return nil return nil
} }
func (p *ProtoPackage) toDBPackage(create bool) { func (p *ProtoPackage) toDBPackage(create bool) error {
if p.DBPackage != nil { if p.DBPackage != nil {
return return nil
} }
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March), dbPkg, err := db.DBPackage.Query().Where(
dbpackage.RepositoryEQ(p.Repo))).Only(context.Background()) dbpackage.Pkgbase(p.Pkgbase),
if err != nil && create { dbpackage.March(p.March),
dbPkg = db.DbPackage.Create(). dbpackage.RepositoryEQ(p.Repo),
).Only(context.Background())
if err != nil && ent.IsNotFound(err) && create {
dbPkg = db.DBPackage.Create().
SetPkgbase(p.Pkgbase). SetPkgbase(p.Pkgbase).
SetMarch(p.March). SetMarch(p.March).
SetPackages(packages2slice(p.Srcinfo.Packages)).
SetRepository(p.Repo). SetRepository(p.Repo).
SaveX(context.Background()) SaveX(context.Background())
} else if err != nil && !ent.IsNotFound(err) {
return err
} }
p.DBPackage = dbPkg p.DBPackage = dbPkg
return nil
} }
func (p *ProtoPackage) exists() (bool, error) { func (p *ProtoPackage) exists() (bool, error) {
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background()) dbPkg, err := db.DBPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -692,7 +678,7 @@ func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg alp
svn2gitVer, err := (&ProtoPackage{ svn2gitVer, err := (&ProtoPackage{
Pkgbase: pkg.Base(), Pkgbase: pkg.Base(),
March: p.March, March: p.March,
}).SVN2GITVersion(h) }).GitVersion(h)
if err != nil { if err != nil {
return false, nil, "", err return false, nil, "", err
} else if svn2gitVer == "" { } else if svn2gitVer == "" {

View File

@@ -1,8 +1,6 @@
package main package main
import ( import (
"context"
"encoding/hex"
"fmt" "fmt"
"github.com/Jguer/go-alpm/v2" "github.com/Jguer/go-alpm/v2"
paconf "github.com/Morganamilo/go-pacmanconf" paconf "github.com/Morganamilo/go-pacmanconf"
@@ -10,9 +8,7 @@ import (
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"io"
"io/fs" "io/fs"
"lukechampine.com/blake3"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@@ -24,13 +20,13 @@ import (
) )
const ( const (
pacmanConf = "/usr/share/devtools/pacman-extra.conf" pacmanConf = "/usr/share/devtools/pacman.conf.d/extra.conf"
makepkgConf = "/usr/share/devtools/makepkg-x86_64.conf" makepkgConf = "/usr/share/devtools/makepkg.conf.d/x86_64.conf"
logDir = "logs" logDir = "logs"
pristineChroot = "root" pristineChroot = "root"
buildDir = "build" buildDir = "build"
lastUpdate = "lastupdate" lastUpdate = "lastupdate"
upstreamDir = "upstream" stateDir = "state"
chrootDir = "chroot" chrootDir = "chroot"
makepkgDir = "makepkg" makepkgDir = "makepkg"
waitingDir = "to_be_moved" waitingDir = "to_be_moved"
@@ -43,8 +39,6 @@ var (
reVar = regexp.MustCompile(`(?mU)^#?[^\S\r\n]*(\w+)[^\S\r\n]*=[^\S\r\n]*([("])([^)"]*)([)"])[^\S\r\n]*$`) reVar = regexp.MustCompile(`(?mU)^#?[^\S\r\n]*(\w+)[^\S\r\n]*=[^\S\r\n]*([("])([^)"]*)([)"])[^\S\r\n]*$`)
reEnvClean = regexp.MustCompile(`(?m) ([\s\\]+) `) reEnvClean = regexp.MustCompile(`(?m) ([\s\\]+) `)
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`) rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
rePkgSource = regexp.MustCompile(`(?msU)^source.*=.*\((.+)\)$`)
rePkgSum = regexp.MustCompile(`(?msU)^sha256sums.*=.*\((.+)\)$`)
rePkgFile = regexp.MustCompile(`^(.+)(?:-.+){2}-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`) rePkgFile = regexp.MustCompile(`^(.+)(?:-.+){2}-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
reLdError = regexp.MustCompile(`(?mi).*collect2: error: ld returned (\d+) exit status.*`) reLdError = regexp.MustCompile(`(?mi).*collect2: error: ld returned (\d+) exit status.*`)
reDownloadError = regexp.MustCompile(`(?m)^error: could not rename .+$`) reDownloadError = regexp.MustCompile(`(?m)^error: could not rename .+$`)
@@ -56,7 +50,7 @@ var (
type Conf struct { type Conf struct {
Arch string Arch string
Repos, March []string Repos, March []string
Svn2git map[string]string StateRepo string `yaml:"state_repo"`
Basedir struct { Basedir struct {
Repo, Work, Debug string Repo, Work, Debug string
} }
@@ -73,9 +67,8 @@ type Conf struct {
Level string Level string
} }
Blacklist struct { Blacklist struct {
Packages []string Packages, Repo []string
Repo []string LTO []string `yaml:"lto"`
LTO []string `yaml:"lto"`
} }
Housekeeping struct { Housekeeping struct {
Interval string Interval string
@@ -85,19 +78,24 @@ type Conf struct {
Skipped, Queued, Latest, Failed, Signing, Building, Unknown string Skipped, Queued, Latest, Failed, Signing, Building, Unknown string
} }
} }
KernelPatches map[string]string `yaml:"kernel_patches"`
KernelToPatch []string `yaml:"kernel_to_patch"`
} }
type Globs []string type Globs []string
type MultiplePKGBUILDError struct { type MultipleStateFilesError struct {
error error
} }
type UnableToSatisfyError struct { type UnableToSatisfyError struct {
error error
} }
type StateInfo struct {
Pkgbase string
PkgVer string
TagVer string
TagRev string
}
func updateLastUpdated() error { func updateLastUpdated() error {
err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0o644) //nolint:gosec err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0o644) //nolint:gosec
if err != nil { if err != nil {
@@ -125,22 +123,6 @@ func statusID2string(s dbpackage.Status) string {
} }
} }
func b3sum(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer func(file *os.File) {
_ = file.Close()
}(file)
hash := blake3.New(32, nil) //nolint:gomnd
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return hex.EncodeToString(hash.Sum(nil)), nil
}
func containsSubStr(str string, subList []string) bool { func containsSubStr(str string, subList []string) bool {
for _, checkStr := range subList { for _, checkStr := range subList {
if strings.Contains(str, checkStr) { if strings.Contains(str, checkStr) {
@@ -187,27 +169,21 @@ func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize {
return datasize.ByteSize(sum) * datasize.KB return datasize.ByteSize(sum) * datasize.KB
} }
func genQueue() ([]*ProtoPackage, error) { func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string, err error) {
pkgs, err := db.DbPackage.Query().Where(dbpackage.Or(dbpackage.StatusEQ(dbpackage.StatusQueued), nameSplit := strings.Split(filepath.Base(filepath.Dir(stateFile)), "-")
dbpackage.StatusEQ(dbpackage.StatusBuild), dbpackage.StatusEQ(dbpackage.StatusBuilding))).All(context.Background()) if len(nameSplit) < 2 {
if err != nil { err = fmt.Errorf("error getting metainfo")
return nil, err return
} }
var pkgbuilds []*ProtoPackage repo = nameSplit[0]
for _, pkg := range pkgs { if len(nameSplit) == 3 {
pkgbuilds = append(pkgbuilds, &ProtoPackage{ subRepo = &nameSplit[1]
Pkgbase: pkg.Pkgbase, arch = nameSplit[2]
Repo: pkg.Repository, } else {
March: pkg.March, arch = nameSplit[1]
FullRepo: pkg.Repository.String() + "-" + pkg.March,
Hash: pkg.Hash,
DBPackage: pkg,
Pkgbuild: pkg.Pkgbuild,
Version: pkg.RepoVersion,
})
} }
return pkgbuilds, nil return
} }
func movePackagesLive(fullRepo string) error { func movePackagesLive(fullRepo string) error {
@@ -238,11 +214,11 @@ func movePackagesLive(fullRepo string) error {
return fmt.Errorf("unable to create folder for debug-packages: %w", mkErr) return fmt.Errorf("unable to create folder for debug-packages: %w", mkErr)
} }
forPackage := strings.TrimSuffix(pkg.Name(), "-debug") forPackage := strings.TrimSuffix(pkg.Name(), "-debug")
log.Debugf("[MOVE] Found debug package for package %s: %s", forPackage, pkg.Name()) log.Debugf("[MOVE] found debug package for package %s: %s", forPackage, pkg.Name())
debugPkgs++ debugPkgs++
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march, filepath.Base(file))); err == nil { if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march, filepath.Base(file))); err == nil {
log.Warningf("[MOVE] Overwrite existing debug infos for %s: %s", forPackage, log.Warningf("[MOVE] overwrite existing debug infos for %s: %s", forPackage,
filepath.Join(conf.Basedir.Debug, march, filepath.Base(file))) filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
} }
@@ -254,7 +230,7 @@ func movePackagesLive(fullRepo string) error {
continue continue
} }
log.Warningf("[MOVE] Deleting package %s: %v", pkg.Name(), err) log.Warningf("[MOVE] deleting package %s: %v", pkg.Name(), err)
_ = os.Remove(file) _ = os.Remove(file)
_ = os.Remove(file + ".sig") _ = os.Remove(file + ".sig")
continue continue
@@ -271,7 +247,6 @@ func movePackagesLive(fullRepo string) error {
toAdd = append(toAdd, &ProtoPackage{ toAdd = append(toAdd, &ProtoPackage{
DBPackage: dbPkg, DBPackage: dbPkg,
Pkgbase: dbPkg.Pkgbase,
PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))}, PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))},
Version: pkg.Version(), Version: pkg.Version(),
March: march, March: march,
@@ -279,7 +254,7 @@ func movePackagesLive(fullRepo string) error {
} }
if len(toAdd) > 0 { if len(toAdd) > 0 {
log.Infof("[%s] Adding %d (%d with debug) packages", fullRepo, len(toAdd), debugPkgs) log.Infof("[%s] adding %d (%d with debug) packages", fullRepo, len(toAdd), debugPkgs)
buildManager.repoAdd[fullRepo] <- toAdd buildManager.repoAdd[fullRepo] <- toAdd
} }
return nil return nil
@@ -563,6 +538,20 @@ func setupMakepkg(march string, flags map[string]any) error {
return nil return nil
} }
func parseState(state string) (*StateInfo, error) {
ss := strings.Split(state, " ")
if len(ss) != 4 {
return nil, fmt.Errorf("invalid state file")
}
return &StateInfo{
Pkgbase: ss[0],
PkgVer: ss[1],
TagVer: ss[2],
TagRev: strings.Trim(ss[3], "\n"),
}, nil
}
func ContainsPkg(pkgs []*ProtoPackage, pkg *ProtoPackage, repoSensitive bool) bool { func ContainsPkg(pkgs []*ProtoPackage, pkg *ProtoPackage, repoSensitive bool) bool {
for _, tPkg := range pkgs { for _, tPkg := range pkgs {
if tPkg.PkgbaseEquals(pkg, repoSensitive) { if tPkg.PkgbaseEquals(pkg, repoSensitive) {