forked from ALHP/ALHP.GO
refactor utils.go, error handling
This commit is contained in:
156
main.go
156
main.go
@@ -132,7 +132,10 @@ func (b *BuildManager) buildWorker(id int, march string) {
|
|||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
cmd.Stderr = &out
|
cmd.Stderr = &out
|
||||||
|
|
||||||
check(cmd.Start())
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("[%s/%s/%s] Error starting build: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
|
|
||||||
b.buildProcMutex.Lock()
|
b.buildProcMutex.Lock()
|
||||||
b.buildProcesses = append(b.buildProcesses, cmd.Process)
|
b.buildProcesses = append(b.buildProcesses, cmd.Process)
|
||||||
@@ -183,13 +186,19 @@ func (b *BuildManager) buildWorker(id int, march string) {
|
|||||||
|
|
||||||
log.Warningf("[%s/%s/%s] Build failed (%d)", pkg.FullRepo, pkg.Pkgbase, pkg.Version, cmd.ProcessState.ExitCode())
|
log.Warningf("[%s/%s/%s] Build failed (%d)", pkg.FullRepo, pkg.Pkgbase, pkg.Version, cmd.ProcessState.ExitCode())
|
||||||
|
|
||||||
check(os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, march), 0755))
|
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, march), 0755)
|
||||||
check(os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log"), out.Bytes(), 0644))
|
if err != nil {
|
||||||
|
log.Warningf("[%s/%s/%s] Error creating logdir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
|
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log"), out.Bytes(), 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[%s/%s/%s] Error writing to logdir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
|
|
||||||
pkg.DbPackage.Update().SetStatus(dbpackage.StatusFailed).ClearSkipReason().SetBuildTimeStart(start).SetBuildTimeEnd(time.Now().UTC()).SetHash(pkg.Hash).ExecX(context.Background())
|
pkg.DbPackage.Update().SetStatus(dbpackage.StatusFailed).ClearSkipReason().SetBuildTimeStart(start).SetBuildTimeEnd(time.Now().UTC()).SetHash(pkg.Hash).ExecX(context.Background())
|
||||||
|
|
||||||
// purge failed package from repo
|
// purge failed package from repo
|
||||||
b.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
|
|
||||||
err = cleanBuildDir(buildDir)
|
err = cleanBuildDir(buildDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -200,7 +209,9 @@ func (b *BuildManager) buildWorker(id int, march string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst"))
|
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst"))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Warningf("[%s/%s/%s] Error scanning builddir for artifacts: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
|
|
||||||
if len(pkgFiles) == 0 {
|
if len(pkgFiles) == 0 {
|
||||||
log.Warningf("No packages found after building %s. Abort build.", pkg.Pkgbase)
|
log.Warningf("No packages found after building %s. Abort build.", pkg.Pkgbase)
|
||||||
@@ -224,13 +235,18 @@ func (b *BuildManager) buildWorker(id int, march string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst*"))
|
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst*"))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Warningf("[%s/%s/%s] Error scanning builddir for artifacts: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, file := range copyFiles {
|
for _, file := range copyFiles {
|
||||||
check(os.MkdirAll(filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo), 0755))
|
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo), 0755)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[%s/%s/%s] Error creating holding dir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
_, err = copyFile(file, filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo, filepath.Base(file)))
|
_, err = copyFile(file, filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo, filepath.Base(file)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
check(err)
|
log.Warningf("[%s/%s/%s] Error coping file to holding dir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
b.buildWG.Done()
|
b.buildWG.Done()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -241,7 +257,10 @@ func (b *BuildManager) buildWorker(id int, march string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log")); err == nil {
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log")); err == nil {
|
||||||
check(os.Remove(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log")))
|
err = os.Remove(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log"))
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[%s/%s/%s] Error removing log: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if pkg.DbPackage.Lto != dbpackage.LtoDisabled && pkg.DbPackage.Lto != dbpackage.LtoAutoDisabled {
|
if pkg.DbPackage.Lto != dbpackage.LtoDisabled && pkg.DbPackage.Lto != dbpackage.LtoAutoDisabled {
|
||||||
@@ -313,7 +332,7 @@ func (b *BuildManager) parseWorker() {
|
|||||||
pkg.DbPackage.SkipReason = "blacklisted (haskell)"
|
pkg.DbPackage.SkipReason = "blacklisted (haskell)"
|
||||||
pkg.DbPackage.Status = dbpackage.StatusSkipped
|
pkg.DbPackage.Status = dbpackage.StatusSkipped
|
||||||
skipping = true
|
skipping = true
|
||||||
} else if isPkgFailed(pkg) {
|
} else if pkg.isPkgFailed() {
|
||||||
log.Debugf("Skipped %s: failed build", pkg.Srcinfo.Pkgbase)
|
log.Debugf("Skipped %s: failed build", pkg.Srcinfo.Pkgbase)
|
||||||
skipping = true
|
skipping = true
|
||||||
}
|
}
|
||||||
@@ -322,7 +341,7 @@ func (b *BuildManager) parseWorker() {
|
|||||||
pkg.DbPackage = pkg.DbPackage.Update().SetUpdated(time.Now()).SetVersion(pkg.Version).
|
pkg.DbPackage = pkg.DbPackage.Update().SetUpdated(time.Now()).SetVersion(pkg.Version).
|
||||||
SetPackages(packages2slice(pkg.Srcinfo.Packages)).SetStatus(pkg.DbPackage.Status).
|
SetPackages(packages2slice(pkg.Srcinfo.Packages)).SetStatus(pkg.DbPackage.Status).
|
||||||
SetSkipReason(pkg.DbPackage.SkipReason).SetHash(pkg.Hash).SaveX(context.Background())
|
SetSkipReason(pkg.DbPackage.SkipReason).SetHash(pkg.Hash).SaveX(context.Background())
|
||||||
b.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
b.parseWG.Done()
|
b.parseWG.Done()
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
@@ -348,19 +367,19 @@ func (b *BuildManager) parseWorker() {
|
|||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
default:
|
default:
|
||||||
log.Warningf("[%s/%s] Problem solving dependencies: %v", pkg.FullRepo, pkg.Srcinfo.Pkgbase, err)
|
log.Warningf("[%s/%s] Problem solving dependencies: %v", pkg.FullRepo, pkg.Srcinfo.Pkgbase, err)
|
||||||
b.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
b.parseWG.Done()
|
b.parseWG.Done()
|
||||||
continue
|
continue
|
||||||
case MultiplePKGBUILDError:
|
case MultiplePKGBUILDError:
|
||||||
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", pkg.Srcinfo.Pkgbase, err)
|
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", pkg.Srcinfo.Pkgbase, err)
|
||||||
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(context.Background())
|
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(context.Background())
|
||||||
b.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
b.parseWG.Done()
|
b.parseWG.Done()
|
||||||
continue
|
continue
|
||||||
case UnableToSatisfyError:
|
case UnableToSatisfyError:
|
||||||
log.Infof("Skipped %s: unable to resolve dependencies: %v", pkg.Srcinfo.Pkgbase, err)
|
log.Infof("Skipped %s: unable to resolve dependencies: %v", pkg.Srcinfo.Pkgbase, err)
|
||||||
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(context.Background())
|
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(context.Background())
|
||||||
b.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
b.parseWG.Done()
|
b.parseWG.Done()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -381,7 +400,7 @@ func (b *BuildManager) parseWorker() {
|
|||||||
// Worst case would be clients downloading a package update twice, once from their official mirror,
|
// Worst case would be clients downloading a package update twice, once from their official mirror,
|
||||||
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
|
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
|
||||||
// in an outdated version.
|
// in an outdated version.
|
||||||
b.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
b.parseWG.Done()
|
b.parseWG.Done()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -548,12 +567,21 @@ func (b *BuildManager) htmlWorker() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
statusTpl, err := template.ParseFiles("tpl/packages.html")
|
statusTpl, err := template.ParseFiles("tpl/packages.html")
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Warningf("[HTML] Error parsing template file: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
check(err)
|
if err != nil {
|
||||||
check(statusTpl.Execute(f, gen))
|
log.Warningf("[HTML] Erro ropening output file: %v", err)
|
||||||
check(f.Close())
|
continue
|
||||||
|
}
|
||||||
|
err = statusTpl.Execute(f, gen)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("[HTML] Error filling template: %v", err)
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
time.Sleep(time.Minute * 5)
|
time.Sleep(time.Minute * 5)
|
||||||
}
|
}
|
||||||
@@ -601,8 +629,13 @@ func (b *BuildManager) repoWorker(repo string) {
|
|||||||
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1")
|
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1")
|
||||||
res, err = cmd.CombinedOutput()
|
res, err = cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
check(err)
|
if err != nil {
|
||||||
updateLastUpdated()
|
log.Warningf("Error running paccache: %v", err)
|
||||||
|
}
|
||||||
|
err = updateLastUpdated()
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Error updating lastupdate: %v", err)
|
||||||
|
}
|
||||||
case pkgL := <-b.repoPurge[repo]:
|
case pkgL := <-b.repoPurge[repo]:
|
||||||
for _, pkg := range pkgL {
|
for _, pkg := range pkgL {
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
||||||
@@ -640,7 +673,10 @@ func (b *BuildManager) repoWorker(repo string) {
|
|||||||
_ = os.Remove(file)
|
_ = os.Remove(file)
|
||||||
_ = os.Remove(file + ".sig")
|
_ = os.Remove(file + ".sig")
|
||||||
}
|
}
|
||||||
updateLastUpdated()
|
err = updateLastUpdated()
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Error updating lastupdate: %v", err)
|
||||||
|
}
|
||||||
b.repoWG.Done()
|
b.repoWG.Done()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -648,7 +684,10 @@ func (b *BuildManager) repoWorker(repo string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *BuildManager) syncWorker() {
|
func (b *BuildManager) syncWorker() {
|
||||||
check(os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0755))
|
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0755)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error creating upstream dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < runtime.NumCPU(); i++ {
|
for i := 0; i < runtime.NumCPU(); i++ {
|
||||||
go b.parseWorker()
|
go b.parseWorker()
|
||||||
@@ -662,12 +701,16 @@ func (b *BuildManager) syncWorker() {
|
|||||||
cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath)
|
cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath)
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error running git clone: %v", err)
|
||||||
|
}
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
cmd := exec.Command("sh", "-c", "cd "+gitPath+" && git reset --hard")
|
cmd := exec.Command("sh", "-c", "cd "+gitPath+" && git reset --hard")
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
log.Debug(string(res))
|
log.Debug(string(res))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error running git reset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
cmd = exec.Command("sh", "-c", "cd "+gitPath+" && git pull")
|
cmd = exec.Command("sh", "-c", "cd "+gitPath+" && git pull")
|
||||||
res, err = cmd.CombinedOutput()
|
res, err = cmd.CombinedOutput()
|
||||||
@@ -700,7 +743,10 @@ func (b *BuildManager) syncWorker() {
|
|||||||
|
|
||||||
// fetch updates between sync runs
|
// fetch updates between sync runs
|
||||||
b.alpmMutex.Lock()
|
b.alpmMutex.Lock()
|
||||||
check(alpmHandle.Release())
|
err = alpmHandle.Release()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error releasing ALPM handle: %v", err)
|
||||||
|
}
|
||||||
err = setupChroot()
|
err = setupChroot()
|
||||||
for err != nil {
|
for err != nil {
|
||||||
log.Warningf("Unable to upgrade chroot, trying again later.")
|
log.Warningf("Unable to upgrade chroot, trying again later.")
|
||||||
@@ -709,11 +755,15 @@ func (b *BuildManager) syncWorker() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Warningf("Error while ALPM-init: %v", err)
|
||||||
|
}
|
||||||
b.alpmMutex.Unlock()
|
b.alpmMutex.Unlock()
|
||||||
|
|
||||||
pkgBuilds, err := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
|
pkgBuilds, err := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error scanning for PKGBUILDs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Shuffle pkgbuilds to spread out long-running builds, otherwise pkgBuilds is alphabetically-sorted
|
// Shuffle pkgbuilds to spread out long-running builds, otherwise pkgBuilds is alphabetically-sorted
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
@@ -747,7 +797,9 @@ func (b *BuildManager) syncWorker() {
|
|||||||
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
|
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
|
||||||
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
|
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
|
||||||
b3s, err := b3sum(pkgbuild)
|
b3s, err := b3sum(pkgbuild)
|
||||||
check(err)
|
if dbErr != nil {
|
||||||
|
log.Fatalf("Error hashing PKGBUILD: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if dbPkg != nil && b3s == dbPkg.Hash {
|
if dbPkg != nil && b3s == dbPkg.Hash {
|
||||||
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s)
|
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s)
|
||||||
@@ -756,7 +808,7 @@ func (b *BuildManager) syncWorker() {
|
|||||||
|
|
||||||
// send to parse
|
// send to parse
|
||||||
b.parseWG.Add(1)
|
b.parseWG.Add(1)
|
||||||
b.parse <- &BuildPackage{
|
b.parse <- &ProtoPackage{
|
||||||
Pkgbuild: pkgbuild,
|
Pkgbuild: pkgbuild,
|
||||||
Pkgbase: mPkgbuild.PkgBase(),
|
Pkgbase: mPkgbuild.PkgBase(),
|
||||||
Repo: dbpackage.Repository(mPkgbuild.Repo()),
|
Repo: dbpackage.Repository(mPkgbuild.Repo()),
|
||||||
@@ -793,13 +845,19 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
confStr, err := os.ReadFile("config.yaml")
|
confStr, err := os.ReadFile("config.yaml")
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error reading config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
err = yaml.Unmarshal(confStr, &conf)
|
err = yaml.Unmarshal(confStr, &conf)
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error parsing config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error parsing log level from config: %v", err)
|
||||||
|
}
|
||||||
log.SetLevel(lvl)
|
log.SetLevel(lvl)
|
||||||
if *journalLog {
|
if *journalLog {
|
||||||
journalhook.Enable()
|
journalhook.Enable()
|
||||||
@@ -811,7 +869,9 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = os.MkdirAll(conf.Basedir.Repo, 0755)
|
err = os.MkdirAll(conf.Basedir.Repo, 0755)
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error creating repo dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if conf.Db.Driver == "pgx" {
|
if conf.Db.Driver == "pgx" {
|
||||||
pdb, err := sql.Open("pgx", conf.Db.ConnectTo)
|
pdb, err := sql.Open("pgx", conf.Db.ConnectTo)
|
||||||
@@ -836,10 +896,10 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
buildManager = &BuildManager{
|
buildManager = &BuildManager{
|
||||||
build: make(map[string]chan *BuildPackage),
|
build: make(map[string]chan *ProtoPackage),
|
||||||
parse: make(chan *BuildPackage, 10000),
|
parse: make(chan *ProtoPackage, 10000),
|
||||||
repoPurge: make(map[string]chan []*BuildPackage),
|
repoPurge: make(map[string]chan []*ProtoPackage),
|
||||||
repoAdd: make(map[string]chan []*BuildPackage),
|
repoAdd: make(map[string]chan []*ProtoPackage),
|
||||||
exit: false,
|
exit: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -847,10 +907,15 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Unable to setup chroot: %v", err)
|
log.Fatalf("Unable to setup chroot: %v", err)
|
||||||
}
|
}
|
||||||
syncMarchs()
|
err = syncMarchs()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error syncing marchs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Fatalf("Error while ALPM-init: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
go buildManager.syncWorker()
|
go buildManager.syncWorker()
|
||||||
go buildManager.htmlWorker()
|
go buildManager.htmlWorker()
|
||||||
@@ -884,12 +949,17 @@ killLoop:
|
|||||||
buildManager.buildProcMutex.RLock()
|
buildManager.buildProcMutex.RLock()
|
||||||
for _, p := range buildManager.buildProcesses {
|
for _, p := range buildManager.buildProcesses {
|
||||||
pgid, err := syscall.Getpgid(p.Pid)
|
pgid, err := syscall.Getpgid(p.Pid)
|
||||||
check(err)
|
if err != nil {
|
||||||
|
log.Warningf("Error getting pgid: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
check(syscall.Kill(-pgid, syscall.SIGTERM))
|
err = syscall.Kill(-pgid, syscall.SIGTERM)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Error killing %d: %v", pgid, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
buildManager.buildProcMutex.RUnlock()
|
buildManager.buildProcMutex.RUnlock()
|
||||||
buildManager.buildWG.Wait()
|
buildManager.buildWG.Wait()
|
||||||
buildManager.repoWG.Wait()
|
buildManager.repoWG.Wait()
|
||||||
check(alpmHandle.Release())
|
_ = alpmHandle.Release()
|
||||||
}
|
}
|
||||||
|
84
package.go
Normal file
84
package.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqljson"
|
||||||
|
"fmt"
|
||||||
|
"git.harting.dev/ALHP/ALHP.GO/ent"
|
||||||
|
"git.harting.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Package string
|
||||||
|
|
||||||
|
// Name returns the name from Package
|
||||||
|
func (path Package) Name() string {
|
||||||
|
fNameSplit := strings.Split(filepath.Base(string(path)), "-")
|
||||||
|
return strings.Join(fNameSplit[:len(fNameSplit)-3], "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path Package) MArch() string {
|
||||||
|
splitPath := strings.Split(string(path), string(filepath.Separator))
|
||||||
|
return strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path Package) Repo() dbpackage.Repository {
|
||||||
|
splitPath := strings.Split(string(path), string(filepath.Separator))
|
||||||
|
return dbpackage.Repository(strings.Split(splitPath[len(splitPath)-4], "-")[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path Package) FullRepo() string {
|
||||||
|
splitPath := strings.Split(string(path), string(filepath.Separator))
|
||||||
|
return splitPath[len(splitPath)-4]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path Package) Version() string {
|
||||||
|
fNameSplit := strings.Split(filepath.Base(string(path)), "-")
|
||||||
|
return strings.Join(fNameSplit[len(fNameSplit)-3:len(fNameSplit)-1], "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path Package) Arch() string {
|
||||||
|
fNameSplit := strings.Split(filepath.Base(string(path)), "-")
|
||||||
|
fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".")
|
||||||
|
return fNameSplit[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path Package) HasValidSignature() (bool, error) {
|
||||||
|
cmd := exec.Command("gpg", "--verify", string(path)+".sig")
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
if cmd.ProcessState.ExitCode() == 2 || cmd.ProcessState.ExitCode() == 1 {
|
||||||
|
return false, nil
|
||||||
|
} else if cmd.ProcessState.ExitCode() == 0 {
|
||||||
|
return true, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, fmt.Errorf("error checking signature: %w (%s)", err, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path *Package) DBPackage(db *ent.Client) (*ent.DbPackage, error) {
|
||||||
|
return path.DBPackageIsolated(path.MArch(), path.Repo(), db)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (path *Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DbPackage, error) {
|
||||||
|
dbPkg, err := db.DbPackage.Query().Where(func(s *sql.Selector) {
|
||||||
|
s.Where(
|
||||||
|
sql.And(
|
||||||
|
sqljson.ValueContains(dbpackage.FieldPackages, path.Name()),
|
||||||
|
sql.EQ(dbpackage.FieldMarch, march),
|
||||||
|
sql.EQ(dbpackage.FieldRepository, repo)),
|
||||||
|
)
|
||||||
|
}).Only(context.Background())
|
||||||
|
if ent.IsNotFound(err) {
|
||||||
|
log.Debugf("Not found in database: %s", path.Name())
|
||||||
|
return nil, err
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dbPkg, nil
|
||||||
|
}
|
22
pkgbuild.go
Normal file
22
pkgbuild.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PKGBUILD string
|
||||||
|
|
||||||
|
func (p PKGBUILD) FullRepo() string {
|
||||||
|
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
|
||||||
|
return sPkgbuild[len(sPkgbuild)-2]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PKGBUILD) Repo() string {
|
||||||
|
return strings.Split(p.FullRepo(), "-")[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PKGBUILD) PkgBase() string {
|
||||||
|
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
|
||||||
|
return sPkgbuild[len(sPkgbuild)-4]
|
||||||
|
}
|
467
proto_package.go
Normal file
467
proto_package.go
Normal file
@@ -0,0 +1,467 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"git.harting.dev/ALHP/ALHP.GO/ent"
|
||||||
|
"git.harting.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||||
|
"github.com/Jguer/go-alpm/v2"
|
||||||
|
"github.com/Morganamilo/go-srcinfo"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProtoPackage struct {
|
||||||
|
Pkgbase string
|
||||||
|
Pkgbuild string
|
||||||
|
Srcinfo *srcinfo.Srcinfo
|
||||||
|
Arch string
|
||||||
|
PkgFiles []string
|
||||||
|
Repo dbpackage.Repository
|
||||||
|
March string
|
||||||
|
FullRepo string
|
||||||
|
Version string
|
||||||
|
Hash string
|
||||||
|
DbPackage *ent.DbPackage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) setupBuildDir() (string, error) {
|
||||||
|
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
|
||||||
|
|
||||||
|
err := cleanBuildDir(buildDir)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("removing old builddir failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.MkdirAll(buildDir, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*"))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
_, err = copyFile(file, filepath.Join(buildDir, filepath.Base(file)))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD")
|
||||||
|
return buildDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) repoVersion() (string, error) {
|
||||||
|
err := p.findPkgFiles()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.PkgFiles) == 0 {
|
||||||
|
return "", fmt.Errorf("not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
fNameSplit := strings.Split(p.PkgFiles[0], "-")
|
||||||
|
return fNameSplit[len(fNameSplit)-3] + "-" + fNameSplit[len(fNameSplit)-2], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
||||||
|
if p.Srcinfo == nil {
|
||||||
|
err := p.genSrcinfo()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating srcinfo: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Version == "" {
|
||||||
|
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(f *os.File) {
|
||||||
|
err := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}(f)
|
||||||
|
|
||||||
|
fStr, err := io.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nStr := rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+p.Srcinfo.Pkgrel+"."+strconv.Itoa(buildNo))
|
||||||
|
_, err = f.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = f.Truncate(0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.WriteString(nStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Version += "." + strconv.Itoa(buildNo)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) prepareKernelPatches() error {
|
||||||
|
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(f *os.File) {
|
||||||
|
err := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}(f)
|
||||||
|
|
||||||
|
fStr, err := io.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// choose best suited patch based on kernel version
|
||||||
|
var curVer string
|
||||||
|
for k := range conf.KernelPatches {
|
||||||
|
if k == p.Pkgbase {
|
||||||
|
curVer = k
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if alpm.VerCmp(p.Srcinfo.Pkgver, k) >= 0 && alpm.VerCmp(k, curVer) >= 0 {
|
||||||
|
curVer = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newPKGBUILD := string(fStr)
|
||||||
|
if conf.KernelPatches[curVer] == "none" {
|
||||||
|
return fmt.Errorf("no patch available")
|
||||||
|
} else if conf.KernelPatches[curVer] == "skip" {
|
||||||
|
log.Debugf("[KP] skipped patching for %s", p.Pkgbase)
|
||||||
|
} else {
|
||||||
|
log.Debugf("[KP] choose patch %s for kernel %s", curVer, p.Srcinfo.Pkgver)
|
||||||
|
|
||||||
|
// add patch to source-array
|
||||||
|
orgSource := rePkgSource.FindStringSubmatch(newPKGBUILD)
|
||||||
|
if orgSource == nil || len(orgSource) < 1 {
|
||||||
|
return fmt.Errorf("no source=() found")
|
||||||
|
}
|
||||||
|
|
||||||
|
sources := strings.Split(orgSource[1], "\n")
|
||||||
|
sources = append(sources, fmt.Sprintf("\"%s\"", conf.KernelPatches[curVer]))
|
||||||
|
|
||||||
|
newPKGBUILD = rePkgSource.ReplaceAllLiteralString(newPKGBUILD, fmt.Sprintf("source=(%s)", strings.Join(sources, "\n")))
|
||||||
|
|
||||||
|
// add patch sha256 to sha256sums-array (yes, hardcoded to sha256)
|
||||||
|
// TODO: support all sums that makepkg also supports
|
||||||
|
// get sum
|
||||||
|
resp, err := http.Get(conf.KernelPatches[curVer])
|
||||||
|
if err != nil || resp.StatusCode != 200 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h := sha256.New()
|
||||||
|
_, err = io.Copy(h, resp.Body)
|
||||||
|
defer func(Body io.ReadCloser) {
|
||||||
|
_ = Body.Close()
|
||||||
|
}(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
orgSums := rePkgSum.FindStringSubmatch(newPKGBUILD)
|
||||||
|
if orgSums == nil || len(orgSums) < 1 {
|
||||||
|
return fmt.Errorf("no sha256sums=() found")
|
||||||
|
}
|
||||||
|
|
||||||
|
sums := strings.Split(orgSums[1], "\n")
|
||||||
|
sums = append(sums, fmt.Sprintf("'%s'", hex.EncodeToString(h.Sum(nil))))
|
||||||
|
|
||||||
|
newPKGBUILD = rePkgSum.ReplaceAllLiteralString(newPKGBUILD, fmt.Sprintf("sha256sums=(\n%s\n)", strings.Join(sums, "\n")))
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable config option
|
||||||
|
switch {
|
||||||
|
case strings.Contains(p.March, "v4"):
|
||||||
|
newPKGBUILD = strings.Replace(newPKGBUILD, "make olddefconfig\n", "echo CONFIG_GENERIC_CPU4=y >> .config\nmake olddefconfig\n", 1)
|
||||||
|
case strings.Contains(p.March, "v3"):
|
||||||
|
newPKGBUILD = strings.Replace(newPKGBUILD, "make olddefconfig\n", "echo CONFIG_GENERIC_CPU3=y >> .config\nmake olddefconfig\n", 1)
|
||||||
|
case strings.Contains(p.March, "v2"):
|
||||||
|
newPKGBUILD = strings.Replace(newPKGBUILD, "make olddefconfig\n", "echo CONFIG_GENERIC_CPU2=y >> .config\nmake olddefconfig\n", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty file before writing
|
||||||
|
_, err = f.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = f.Truncate(0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.WriteString(newPKGBUILD)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) importKeys() error {
|
||||||
|
if p.Srcinfo.ValidPGPKeys != nil {
|
||||||
|
args := []string{"--keyserver", "keyserver.ubuntu.com", "--recv-keys"}
|
||||||
|
args = append(args, p.Srcinfo.ValidPGPKeys...)
|
||||||
|
cmd := exec.Command("gpg", args...)
|
||||||
|
_, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
|
||||||
|
dbs, err := h.SyncDBs()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
buildManager.alpmMutex.Lock()
|
||||||
|
var pkg alpm.IPackage
|
||||||
|
if p.Srcinfo != nil {
|
||||||
|
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
||||||
|
} else {
|
||||||
|
pkg, err = dbs.FindSatisfier(p.DbPackage.Packages[0])
|
||||||
|
}
|
||||||
|
buildManager.alpmMutex.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg.DB().Name() != p.Repo.String() || pkg.Base() != p.Pkgbase {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Srcinfo != nil && (p.Srcinfo.Arch[0] != pkg.Architecture() || p.Srcinfo.Pkgbase != pkg.Base()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
|
||||||
|
if p.Pkgbuild == "" && p.Pkgbase == "" {
|
||||||
|
return "", fmt.Errorf("invalid arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
// upstream/upstream-core-extra/extra-cmake-modules/repos/extra-any/PKGBUILD
|
||||||
|
pkgBuilds, _ := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"))
|
||||||
|
|
||||||
|
var fPkgbuilds []string
|
||||||
|
for _, pkgbuild := range pkgBuilds {
|
||||||
|
mPkgbuild := PKGBUILD(pkgbuild)
|
||||||
|
if mPkgbuild.FullRepo() == "trunk" || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !contains(fPkgbuilds, pkgbuild) {
|
||||||
|
fPkgbuilds = append(fPkgbuilds, pkgbuild)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fPkgbuilds) > 1 {
|
||||||
|
log.Infof("%s: multiple PKGBUILD found, try resolving from mirror", p.Pkgbase)
|
||||||
|
dbs, err := h.SyncDBs()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
buildManager.alpmMutex.Lock()
|
||||||
|
iPackage, err := dbs.FindSatisfier(p.Pkgbase)
|
||||||
|
buildManager.alpmMutex.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgloop:
|
||||||
|
for _, pkgbuild := range fPkgbuilds {
|
||||||
|
repo := strings.Split(filepath.Base(filepath.Dir(pkgbuild)), "-")[0]
|
||||||
|
upstreamA := strings.Split(filepath.Dir(pkgbuild), "/")
|
||||||
|
upstream := upstreamA[len(upstreamA)-4]
|
||||||
|
|
||||||
|
switch upstream {
|
||||||
|
case "upstream-core-extra":
|
||||||
|
if iPackage.DB().Name() == repo && (repo == "extra" || repo == "core") {
|
||||||
|
fPkgbuilds = []string{pkgbuild}
|
||||||
|
break pkgloop
|
||||||
|
}
|
||||||
|
case "upstream-community":
|
||||||
|
if iPackage.DB().Name() == repo && repo == "community" {
|
||||||
|
fPkgbuilds = []string{pkgbuild}
|
||||||
|
break pkgloop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fPkgbuilds) > 1 {
|
||||||
|
return "", MultiplePKGBUILDError{fmt.Errorf("%s: multiple PKGBUILD found: %s", p.Pkgbase, fPkgbuilds)}
|
||||||
|
}
|
||||||
|
log.Infof("%s: resolving successful: MirrorRepo=%s; PKGBUILD chosen: %s", p.Pkgbase, iPackage.DB().Name(), fPkgbuilds[0])
|
||||||
|
} else if len(fPkgbuilds) == 0 {
|
||||||
|
return "", fmt.Errorf("%s: no matching PKGBUILD found (searched: %s, canidates: %s)", p.Pkgbase, filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"), pkgBuilds)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("sh", "-c", "cd "+filepath.Dir(fPkgbuilds[0])+"&&"+"makepkg --printsrcinfo")
|
||||||
|
res, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := srcinfo.Parse(string(res))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return constructVersion(info.Pkgver, info.Pkgrel, info.Epoch), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) isPkgFailed() bool {
|
||||||
|
if p.DbPackage.Version == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.genSrcinfo(); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Version == "" {
|
||||||
|
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if alpm.VerCmp(p.DbPackage.Version, p.Version) < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return p.DbPackage.Status == dbpackage.StatusFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) genSrcinfo() error {
|
||||||
|
if p.Srcinfo != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("sh", "-c", "cd "+filepath.Dir(p.Pkgbuild)+"&&"+"makepkg --printsrcinfo -p "+filepath.Base(p.Pkgbuild))
|
||||||
|
res, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("makepkg exit non-zero (PKGBUILD: %s): %w (%s)", p.Pkgbuild, err, string(res))
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := srcinfo.Parse(string(res))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Srcinfo = info
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) findPkgFiles() error {
|
||||||
|
pkgs, err := os.ReadDir(filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var realPkgs []string
|
||||||
|
for _, realPkg := range p.DbPackage.Packages {
|
||||||
|
realPkgs = append(realPkgs, realPkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fPkg []string
|
||||||
|
for _, file := range pkgs {
|
||||||
|
if !file.IsDir() && !strings.HasSuffix(file.Name(), ".sig") {
|
||||||
|
matches := rePkgFile.FindStringSubmatch(file.Name())
|
||||||
|
|
||||||
|
if len(matches) > 1 && contains(realPkgs, matches[1]) {
|
||||||
|
fPkg = append(fPkg, filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch, file.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.PkgFiles = fPkg
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) toDbPackage(create bool) {
|
||||||
|
if p.DbPackage != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March), dbpackage.RepositoryEQ(p.Repo))).Only(context.Background())
|
||||||
|
if err != nil && create {
|
||||||
|
dbPkg = db.DbPackage.Create().SetPkgbase(p.Pkgbase).SetMarch(p.March).SetPackages(packages2slice(p.Srcinfo.Packages)).SetRepository(p.Repo).SaveX(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
p.DbPackage = dbPkg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p ProtoPackage) exists() (bool, error) {
|
||||||
|
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dbPkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (bool, alpm.IPackage, string, error) {
|
||||||
|
dbs, err := h.SyncDBs()
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
allDepends := p.Srcinfo.Depends
|
||||||
|
allDepends = append(allDepends, p.Srcinfo.MakeDepends...)
|
||||||
|
|
||||||
|
for _, dep := range allDepends {
|
||||||
|
buildManager.alpmMutex.Lock()
|
||||||
|
pkg, err := dbs.FindSatisfier(dep.Value)
|
||||||
|
buildManager.alpmMutex.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, "", UnableToSatisfyError{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
svn2gitVer, err := (&ProtoPackage{
|
||||||
|
Pkgbase: pkg.Base(),
|
||||||
|
}).SVN2GITVersion(h)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, "", err
|
||||||
|
} else if svn2gitVer == "" {
|
||||||
|
return false, nil, "", fmt.Errorf("no svn2git version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if alpm.VerCmp(svn2gitVer, pkg.Version()) > 0 {
|
||||||
|
return false, pkg, svn2gitVer, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil, "", nil
|
||||||
|
}
|
@@ -66,7 +66,7 @@ func TestIncreasePkgRel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
_ = pkgbuild.Close()
|
_ = pkgbuild.Close()
|
||||||
|
|
||||||
buildPkg := &BuildPackage{
|
buildPkg := &ProtoPackage{
|
||||||
Pkgbase: "gnome-todo",
|
Pkgbase: "gnome-todo",
|
||||||
Pkgbuild: pkgbuild.Name(),
|
Pkgbuild: pkgbuild.Name(),
|
||||||
}
|
}
|
615
utils.go
615
utils.go
@@ -2,10 +2,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqljson"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.harting.dev/ALHP/ALHP.GO/ent"
|
"git.harting.dev/ALHP/ALHP.GO/ent"
|
||||||
"git.harting.dev/ALHP/ALHP.GO/ent/dbpackage"
|
"git.harting.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||||
@@ -16,7 +13,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"lukechampine.com/blake3"
|
"lukechampine.com/blake3"
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -54,25 +50,11 @@ var (
|
|||||||
reSigError = regexp.MustCompile(`(?m)^error: .*: signature from .* is invalid$`)
|
reSigError = regexp.MustCompile(`(?m)^error: .*: signature from .* is invalid$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
type BuildPackage struct {
|
|
||||||
Pkgbase string
|
|
||||||
Pkgbuild string
|
|
||||||
Srcinfo *srcinfo.Srcinfo
|
|
||||||
Arch string
|
|
||||||
PkgFiles []string
|
|
||||||
Repo dbpackage.Repository
|
|
||||||
March string
|
|
||||||
FullRepo string
|
|
||||||
Version string
|
|
||||||
Hash string
|
|
||||||
DbPackage *ent.DbPackage
|
|
||||||
}
|
|
||||||
|
|
||||||
type BuildManager struct {
|
type BuildManager struct {
|
||||||
build map[string]chan *BuildPackage
|
build map[string]chan *ProtoPackage
|
||||||
parse chan *BuildPackage
|
parse chan *ProtoPackage
|
||||||
repoPurge map[string]chan []*BuildPackage
|
repoPurge map[string]chan []*ProtoPackage
|
||||||
repoAdd map[string]chan []*BuildPackage
|
repoAdd map[string]chan []*ProtoPackage
|
||||||
exit bool
|
exit bool
|
||||||
buildWG sync.WaitGroup
|
buildWG sync.WaitGroup
|
||||||
parseWG sync.WaitGroup
|
parseWG sync.WaitGroup
|
||||||
@@ -119,8 +101,6 @@ type Conf struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Globs []string
|
type Globs []string
|
||||||
type Package string
|
|
||||||
type PKGBUILD string
|
|
||||||
|
|
||||||
type MultiplePKGBUILDError struct {
|
type MultiplePKGBUILDError struct {
|
||||||
error
|
error
|
||||||
@@ -129,60 +109,12 @@ type UnableToSatisfyError struct {
|
|||||||
error
|
error
|
||||||
}
|
}
|
||||||
|
|
||||||
func check(e error) {
|
func updateLastUpdated() error {
|
||||||
if e != nil {
|
err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0644)
|
||||||
panic(e)
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
|
|
||||||
func (p PKGBUILD) FullRepo() string {
|
|
||||||
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
|
|
||||||
return sPkgbuild[len(sPkgbuild)-2]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PKGBUILD) Repo() string {
|
|
||||||
return strings.Split(p.FullRepo(), "-")[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PKGBUILD) PkgBase() string {
|
|
||||||
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
|
|
||||||
return sPkgbuild[len(sPkgbuild)-4]
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateLastUpdated() {
|
|
||||||
check(os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0644))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name from Package
|
|
||||||
func (path Package) Name() string {
|
|
||||||
fNameSplit := strings.Split(filepath.Base(string(path)), "-")
|
|
||||||
return strings.Join(fNameSplit[:len(fNameSplit)-3], "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path Package) MArch() string {
|
|
||||||
splitPath := strings.Split(string(path), string(filepath.Separator))
|
|
||||||
return strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path Package) Repo() dbpackage.Repository {
|
|
||||||
splitPath := strings.Split(string(path), string(filepath.Separator))
|
|
||||||
return dbpackage.Repository(strings.Split(splitPath[len(splitPath)-4], "-")[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path Package) FullRepo() string {
|
|
||||||
splitPath := strings.Split(string(path), string(filepath.Separator))
|
|
||||||
return splitPath[len(splitPath)-4]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path Package) Version() string {
|
|
||||||
fNameSplit := strings.Split(filepath.Base(string(path)), "-")
|
|
||||||
return strings.Join(fNameSplit[len(fNameSplit)-3:len(fNameSplit)-1], "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path Package) Arch() string {
|
|
||||||
fNameSplit := strings.Split(filepath.Base(string(path)), "-")
|
|
||||||
fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".")
|
|
||||||
return fNameSplit[0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func statusId2string(s dbpackage.Status) string {
|
func statusId2string(s dbpackage.Status) string {
|
||||||
@@ -210,7 +142,7 @@ func b3sum(filePath string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
defer func(file *os.File) {
|
defer func(file *os.File) {
|
||||||
check(file.Close())
|
_ = file.Close()
|
||||||
}(file)
|
}(file)
|
||||||
|
|
||||||
hash := blake3.New(32, nil)
|
hash := blake3.New(32, nil)
|
||||||
@@ -240,201 +172,6 @@ func cleanBuildDir(dir string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *BuildPackage) setupBuildDir() (string, error) {
|
|
||||||
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
|
|
||||||
|
|
||||||
err := cleanBuildDir(buildDir)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("removing old builddir failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.MkdirAll(buildDir, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*"))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
_, err = copyFile(file, filepath.Join(buildDir, filepath.Base(file)))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD")
|
|
||||||
return buildDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BuildPackage) repoVersion() (string, error) {
|
|
||||||
err := p.findPkgFiles()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p.PkgFiles) == 0 {
|
|
||||||
return "", fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
fNameSplit := strings.Split(p.PkgFiles[0], "-")
|
|
||||||
return fNameSplit[len(fNameSplit)-3] + "-" + fNameSplit[len(fNameSplit)-2], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BuildPackage) increasePkgRel(buildNo int) error {
|
|
||||||
if p.Srcinfo == nil {
|
|
||||||
err := p.genSrcinfo()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error generating srcinfo: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Version == "" {
|
|
||||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(f *os.File) {
|
|
||||||
err := f.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}(f)
|
|
||||||
|
|
||||||
fStr, err := io.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
nStr := rePkgRel.ReplaceAllLiteralString(string(fStr), "pkgrel="+p.Srcinfo.Pkgrel+"."+strconv.Itoa(buildNo))
|
|
||||||
_, err = f.Seek(0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = f.Truncate(0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.WriteString(nStr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Version += "." + strconv.Itoa(buildNo)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BuildPackage) prepareKernelPatches() error {
|
|
||||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(f *os.File) {
|
|
||||||
err := f.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}(f)
|
|
||||||
|
|
||||||
fStr, err := io.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// choose best suited patch based on kernel version
|
|
||||||
var curVer string
|
|
||||||
for k := range conf.KernelPatches {
|
|
||||||
if k == p.Pkgbase {
|
|
||||||
curVer = k
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if alpm.VerCmp(p.Srcinfo.Pkgver, k) >= 0 && alpm.VerCmp(k, curVer) >= 0 {
|
|
||||||
curVer = k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newPKGBUILD := string(fStr)
|
|
||||||
if conf.KernelPatches[curVer] == "none" {
|
|
||||||
return fmt.Errorf("no patch available")
|
|
||||||
} else if conf.KernelPatches[curVer] == "skip" {
|
|
||||||
log.Debugf("[KP] skipped patching for %s", p.Pkgbase)
|
|
||||||
} else {
|
|
||||||
log.Debugf("[KP] choose patch %s for kernel %s", curVer, p.Srcinfo.Pkgver)
|
|
||||||
|
|
||||||
// add patch to source-array
|
|
||||||
orgSource := rePkgSource.FindStringSubmatch(newPKGBUILD)
|
|
||||||
if orgSource == nil || len(orgSource) < 1 {
|
|
||||||
return fmt.Errorf("no source=() found")
|
|
||||||
}
|
|
||||||
|
|
||||||
sources := strings.Split(orgSource[1], "\n")
|
|
||||||
sources = append(sources, fmt.Sprintf("\"%s\"", conf.KernelPatches[curVer]))
|
|
||||||
|
|
||||||
newPKGBUILD = rePkgSource.ReplaceAllLiteralString(newPKGBUILD, fmt.Sprintf("source=(%s)", strings.Join(sources, "\n")))
|
|
||||||
|
|
||||||
// add patch sha256 to sha256sums-array (yes, hardcoded to sha256)
|
|
||||||
// TODO: support all sums that makepkg also supports
|
|
||||||
// get sum
|
|
||||||
resp, err := http.Get(conf.KernelPatches[curVer])
|
|
||||||
if err != nil || resp.StatusCode != 200 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
h := sha256.New()
|
|
||||||
_, err = io.Copy(h, resp.Body)
|
|
||||||
defer func(Body io.ReadCloser) {
|
|
||||||
_ = Body.Close()
|
|
||||||
}(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
orgSums := rePkgSum.FindStringSubmatch(newPKGBUILD)
|
|
||||||
if orgSums == nil || len(orgSums) < 1 {
|
|
||||||
return fmt.Errorf("no sha256sums=() found")
|
|
||||||
}
|
|
||||||
|
|
||||||
sums := strings.Split(orgSums[1], "\n")
|
|
||||||
sums = append(sums, fmt.Sprintf("'%s'", hex.EncodeToString(h.Sum(nil))))
|
|
||||||
|
|
||||||
newPKGBUILD = rePkgSum.ReplaceAllLiteralString(newPKGBUILD, fmt.Sprintf("sha256sums=(\n%s\n)", strings.Join(sums, "\n")))
|
|
||||||
}
|
|
||||||
|
|
||||||
// enable config option
|
|
||||||
switch {
|
|
||||||
case strings.Contains(p.March, "v4"):
|
|
||||||
newPKGBUILD = strings.Replace(newPKGBUILD, "make olddefconfig\n", "echo CONFIG_GENERIC_CPU4=y >> .config\nmake olddefconfig\n", 1)
|
|
||||||
case strings.Contains(p.March, "v3"):
|
|
||||||
newPKGBUILD = strings.Replace(newPKGBUILD, "make olddefconfig\n", "echo CONFIG_GENERIC_CPU3=y >> .config\nmake olddefconfig\n", 1)
|
|
||||||
case strings.Contains(p.March, "v2"):
|
|
||||||
newPKGBUILD = strings.Replace(newPKGBUILD, "make olddefconfig\n", "echo CONFIG_GENERIC_CPU2=y >> .config\nmake olddefconfig\n", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// empty file before writing
|
|
||||||
_, err = f.Seek(0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = f.Truncate(0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.WriteString(newPKGBUILD)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func movePackagesLive(fullRepo string) error {
|
func movePackagesLive(fullRepo string) error {
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
@@ -450,11 +187,11 @@ func movePackagesLive(fullRepo string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
toAdd := make([]*BuildPackage, 0)
|
toAdd := make([]*ProtoPackage, 0)
|
||||||
|
|
||||||
for _, file := range pkgFiles {
|
for _, file := range pkgFiles {
|
||||||
pkg := Package(file)
|
pkg := Package(file)
|
||||||
dbPkg, err := pkg.DBPackageIsolated(march, dbpackage.Repository(repo))
|
dbPkg, err := pkg.DBPackageIsolated(march, dbpackage.Repository(repo), db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.HasSuffix(pkg.Name(), "-debug") {
|
if strings.HasSuffix(pkg.Name(), "-debug") {
|
||||||
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0755)
|
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0755)
|
||||||
@@ -491,7 +228,7 @@ func movePackagesLive(fullRepo string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
toAdd = append(toAdd, &BuildPackage{
|
toAdd = append(toAdd, &ProtoPackage{
|
||||||
DbPackage: dbPkg,
|
DbPackage: dbPkg,
|
||||||
Pkgbase: dbPkg.Pkgbase,
|
Pkgbase: dbPkg.Pkgbase,
|
||||||
PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))},
|
PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))},
|
||||||
@@ -528,18 +265,6 @@ func packages2slice(pkgs interface{}) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *BuildPackage) importKeys() error {
|
|
||||||
if p.Srcinfo.ValidPGPKeys != nil {
|
|
||||||
args := []string{"--keyserver", "keyserver.ubuntu.com", "--recv-keys"}
|
|
||||||
args = append(args, p.Srcinfo.ValidPGPKeys...)
|
|
||||||
cmd := exec.Command("gpg", args...)
|
|
||||||
_, err := cmd.CombinedOutput()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func constructVersion(pkgver string, pkgrel string, epoch string) string {
|
func constructVersion(pkgver string, pkgrel string, epoch string) string {
|
||||||
if epoch == "" {
|
if epoch == "" {
|
||||||
return pkgver + "-" + pkgrel
|
return pkgver + "-" + pkgrel
|
||||||
@@ -587,150 +312,6 @@ func initALPM(root string, dbpath string) (*alpm.Handle, error) {
|
|||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *BuildPackage) isAvailable(h *alpm.Handle) bool {
|
|
||||||
dbs, err := h.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
var pkg alpm.IPackage
|
|
||||||
if p.Srcinfo != nil {
|
|
||||||
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
|
||||||
} else {
|
|
||||||
pkg, err = dbs.FindSatisfier(p.DbPackage.Packages[0])
|
|
||||||
}
|
|
||||||
buildManager.alpmMutex.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.DB().Name() != p.Repo.String() || pkg.Base() != p.Pkgbase {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Srcinfo != nil && (p.Srcinfo.Arch[0] != pkg.Architecture() || p.Srcinfo.Pkgbase != pkg.Base()) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BuildPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
|
|
||||||
if p.Pkgbuild == "" && p.Pkgbase == "" {
|
|
||||||
return "", fmt.Errorf("invalid arguments")
|
|
||||||
}
|
|
||||||
|
|
||||||
// upstream/upstream-core-extra/extra-cmake-modules/repos/extra-any/PKGBUILD
|
|
||||||
pkgBuilds, _ := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"))
|
|
||||||
|
|
||||||
var fPkgbuilds []string
|
|
||||||
for _, pkgbuild := range pkgBuilds {
|
|
||||||
mPkgbuild := PKGBUILD(pkgbuild)
|
|
||||||
if mPkgbuild.FullRepo() == "trunk" || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !contains(fPkgbuilds, pkgbuild) {
|
|
||||||
fPkgbuilds = append(fPkgbuilds, pkgbuild)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fPkgbuilds) > 1 {
|
|
||||||
log.Infof("%s: multiple PKGBUILD found, try resolving from mirror", p.Pkgbase)
|
|
||||||
dbs, err := h.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
iPackage, err := dbs.FindSatisfier(p.Pkgbase)
|
|
||||||
buildManager.alpmMutex.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
pkgloop:
|
|
||||||
for _, pkgbuild := range fPkgbuilds {
|
|
||||||
repo := strings.Split(filepath.Base(filepath.Dir(pkgbuild)), "-")[0]
|
|
||||||
upstreamA := strings.Split(filepath.Dir(pkgbuild), "/")
|
|
||||||
upstream := upstreamA[len(upstreamA)-4]
|
|
||||||
|
|
||||||
switch upstream {
|
|
||||||
case "upstream-core-extra":
|
|
||||||
if iPackage.DB().Name() == repo && (repo == "extra" || repo == "core") {
|
|
||||||
fPkgbuilds = []string{pkgbuild}
|
|
||||||
break pkgloop
|
|
||||||
}
|
|
||||||
case "upstream-community":
|
|
||||||
if iPackage.DB().Name() == repo && repo == "community" {
|
|
||||||
fPkgbuilds = []string{pkgbuild}
|
|
||||||
break pkgloop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fPkgbuilds) > 1 {
|
|
||||||
return "", MultiplePKGBUILDError{fmt.Errorf("%s: multiple PKGBUILD found: %s", p.Pkgbase, fPkgbuilds)}
|
|
||||||
}
|
|
||||||
log.Infof("%s: resolving successful: MirrorRepo=%s; PKGBUILD chosen: %s", p.Pkgbase, iPackage.DB().Name(), fPkgbuilds[0])
|
|
||||||
} else if len(fPkgbuilds) == 0 {
|
|
||||||
return "", fmt.Errorf("%s: no matching PKGBUILD found (searched: %s, canidates: %s)", p.Pkgbase, filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"), pkgBuilds)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("sh", "-c", "cd "+filepath.Dir(fPkgbuilds[0])+"&&"+"makepkg --printsrcinfo")
|
|
||||||
res, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := srcinfo.Parse(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return constructVersion(info.Pkgver, info.Pkgrel, info.Epoch), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isPkgFailed(pkg *BuildPackage) bool {
|
|
||||||
if pkg.DbPackage.Version == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pkg.genSrcinfo(); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.Version == "" {
|
|
||||||
pkg.Version = constructVersion(pkg.Srcinfo.Pkgver, pkg.Srcinfo.Pkgrel, pkg.Srcinfo.Epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
if alpm.VerCmp(pkg.DbPackage.Version, pkg.Version) < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return pkg.DbPackage.Status == dbpackage.StatusFailed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BuildPackage) genSrcinfo() error {
|
|
||||||
if p.Srcinfo != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("sh", "-c", "cd "+filepath.Dir(p.Pkgbuild)+"&&"+"makepkg --printsrcinfo -p "+filepath.Base(p.Pkgbuild))
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("makepkg exit non-zero (PKGBUILD: %s): %w (%s)", p.Pkgbuild, err, string(res))
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := srcinfo.Parse(string(res))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Srcinfo = info
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupChroot() error {
|
func setupChroot() error {
|
||||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot)); err == nil {
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot)); err == nil {
|
||||||
//goland:noinspection SpellCheckingInspection
|
//goland:noinspection SpellCheckingInspection
|
||||||
@@ -741,8 +322,10 @@ func setupChroot() error {
|
|||||||
return fmt.Errorf("Unable to update chroot: %w\n%s", err, string(res))
|
return fmt.Errorf("Unable to update chroot: %w\n%s", err, string(res))
|
||||||
}
|
}
|
||||||
} else if os.IsNotExist(err) {
|
} else if os.IsNotExist(err) {
|
||||||
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0755)
|
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0755)
|
||||||
check(err)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
cmd := exec.Command("mkarchroot", "-C", pacmanConf, filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel")
|
cmd := exec.Command("mkarchroot", "-C", pacmanConf, filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel")
|
||||||
res, err := cmd.CombinedOutput()
|
res, err := cmd.CombinedOutput()
|
||||||
@@ -756,43 +339,6 @@ func setupChroot() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (path *Package) DBPackage() (*ent.DbPackage, error) {
|
|
||||||
return path.DBPackageIsolated(path.MArch(), path.Repo())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path *Package) DBPackageIsolated(march string, repo dbpackage.Repository) (*ent.DbPackage, error) {
|
|
||||||
dbPkg, err := db.DbPackage.Query().Where(func(s *sql.Selector) {
|
|
||||||
s.Where(
|
|
||||||
sql.And(
|
|
||||||
sqljson.ValueContains(dbpackage.FieldPackages, path.Name()),
|
|
||||||
sql.EQ(dbpackage.FieldMarch, march),
|
|
||||||
sql.EQ(dbpackage.FieldRepository, repo)),
|
|
||||||
)
|
|
||||||
}).Only(context.Background())
|
|
||||||
if ent.IsNotFound(err) {
|
|
||||||
log.Debugf("Not found in database: %s", path.Name())
|
|
||||||
return nil, err
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dbPkg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (path Package) hasValidSignature() (bool, error) {
|
|
||||||
cmd := exec.Command("gpg", "--verify", string(path)+".sig")
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
log.Debug(string(res))
|
|
||||||
if cmd.ProcessState.ExitCode() == 2 || cmd.ProcessState.ExitCode() == 1 {
|
|
||||||
return false, nil
|
|
||||||
} else if cmd.ProcessState.ExitCode() == 0 {
|
|
||||||
return true, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
fullRepo := repo + "-" + march
|
fullRepo := repo + "-" + march
|
||||||
@@ -805,22 +351,22 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
|||||||
for _, path := range packages {
|
for _, path := range packages {
|
||||||
mPackage := Package(path)
|
mPackage := Package(path)
|
||||||
|
|
||||||
dbPkg, err := mPackage.DBPackage()
|
dbPkg, err := mPackage.DBPackage(db)
|
||||||
if ent.IsNotFound(err) {
|
if ent.IsNotFound(err) {
|
||||||
log.Infof("[HK/%s] removing orphan %s", fullRepo, filepath.Base(path))
|
log.Infof("[HK/%s] removing orphan %s", fullRepo, filepath.Base(path))
|
||||||
pkg := &BuildPackage{
|
pkg := &ProtoPackage{
|
||||||
FullRepo: mPackage.FullRepo(),
|
FullRepo: mPackage.FullRepo(),
|
||||||
PkgFiles: []string{path},
|
PkgFiles: []string{path},
|
||||||
March: mPackage.MArch(),
|
March: mPackage.MArch(),
|
||||||
}
|
}
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Warningf("[HK] Problem fetching package from db for %s: %v", path, err)
|
log.Warningf("[HK] Problem fetching package from db for %s: %v", path, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pkg := &BuildPackage{
|
pkg := &ProtoPackage{
|
||||||
Pkgbase: dbPkg.Pkgbase,
|
Pkgbase: dbPkg.Pkgbase,
|
||||||
Repo: mPackage.Repo(),
|
Repo: mPackage.Repo(),
|
||||||
FullRepo: mPackage.FullRepo(),
|
FullRepo: mPackage.FullRepo(),
|
||||||
@@ -849,7 +395,7 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
|||||||
if err != nil || pkgResolved.DB().Name() != pkg.DbPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() || pkgResolved.Architecture() != pkg.Arch {
|
if err != nil || pkgResolved.DB().Name() != pkg.DbPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() || pkgResolved.Architecture() != pkg.Arch {
|
||||||
// package not found on mirror/db -> not part of any repo anymore
|
// package not found on mirror/db -> not part of any repo anymore
|
||||||
log.Infof("[HK/%s/%s] not included in repo", pkg.FullRepo, pkg.Pkgbase)
|
log.Infof("[HK/%s/%s] not included in repo", pkg.FullRepo, pkg.Pkgbase)
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
err = db.DbPackage.DeleteOne(pkg.DbPackage).Exec(context.Background())
|
err = db.DbPackage.DeleteOne(pkg.DbPackage).Exec(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -863,13 +409,13 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// check if pkg signature is valid
|
// check if pkg signature is valid
|
||||||
valid, err := mPackage.hasValidSignature()
|
valid, err := mPackage.HasValidSignature()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !valid {
|
if !valid {
|
||||||
log.Infof("[HK/%s/%s] invalid package signature", pkg.FullRepo, pkg.Pkgbase)
|
log.Infof("[HK/%s/%s] invalid package signature", pkg.FullRepo, pkg.Pkgbase)
|
||||||
buildManager.repoPurge[pkg.FullRepo] <- []*BuildPackage{pkg}
|
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -896,7 +442,7 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, dbPkg := range dbPackages {
|
for _, dbPkg := range dbPackages {
|
||||||
pkg := &BuildPackage{
|
pkg := &ProtoPackage{
|
||||||
Pkgbase: dbPkg.Pkgbase,
|
Pkgbase: dbPkg.Pkgbase,
|
||||||
Repo: dbPkg.Repository,
|
Repo: dbPkg.Repository,
|
||||||
March: dbPkg.March,
|
March: dbPkg.March,
|
||||||
@@ -935,13 +481,13 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pkg := &BuildPackage{
|
pkg := &ProtoPackage{
|
||||||
FullRepo: fullRepo,
|
FullRepo: fullRepo,
|
||||||
PkgFiles: existingSplits,
|
PkgFiles: existingSplits,
|
||||||
March: march,
|
March: march,
|
||||||
DbPackage: dbPkg,
|
DbPackage: dbPkg,
|
||||||
}
|
}
|
||||||
buildManager.repoPurge[fullRepo] <- []*BuildPackage{pkg}
|
buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -962,7 +508,7 @@ func logHK() error {
|
|||||||
pkgbase := strings.Join(extSplit[:len(extSplit)-1], ".")
|
pkgbase := strings.Join(extSplit[:len(extSplit)-1], ".")
|
||||||
march := pathSplit[len(pathSplit)-2]
|
march := pathSplit[len(pathSplit)-2]
|
||||||
|
|
||||||
pkg := BuildPackage{
|
pkg := ProtoPackage{
|
||||||
Pkgbase: pkgbase,
|
Pkgbase: pkgbase,
|
||||||
March: march,
|
March: march,
|
||||||
}
|
}
|
||||||
@@ -1009,58 +555,12 @@ func logHK() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *BuildPackage) findPkgFiles() error {
|
func syncMarchs() error {
|
||||||
pkgs, err := os.ReadDir(filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch))
|
files, err := os.ReadDir(conf.Basedir.Repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var realPkgs []string
|
|
||||||
for _, realPkg := range p.DbPackage.Packages {
|
|
||||||
realPkgs = append(realPkgs, realPkg)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fPkg []string
|
|
||||||
for _, file := range pkgs {
|
|
||||||
if !file.IsDir() && !strings.HasSuffix(file.Name(), ".sig") {
|
|
||||||
matches := rePkgFile.FindStringSubmatch(file.Name())
|
|
||||||
|
|
||||||
if len(matches) > 1 && contains(realPkgs, matches[1]) {
|
|
||||||
fPkg = append(fPkg, filepath.Join(conf.Basedir.Repo, p.FullRepo, "os", conf.Arch, file.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.PkgFiles = fPkg
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BuildPackage) toDbPackage(create bool) {
|
|
||||||
if p.DbPackage != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March), dbpackage.RepositoryEQ(p.Repo))).Only(context.Background())
|
|
||||||
if err != nil && create {
|
|
||||||
dbPkg = db.DbPackage.Create().SetPkgbase(p.Pkgbase).SetMarch(p.March).SetPackages(packages2slice(p.Srcinfo.Packages)).SetRepository(p.Repo).SaveX(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
p.DbPackage = dbPkg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p BuildPackage) exists() (bool, error) {
|
|
||||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dbPkg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func syncMarchs() {
|
|
||||||
files, err := os.ReadDir(conf.Basedir.Repo)
|
|
||||||
check(err)
|
|
||||||
|
|
||||||
var eRepos []string
|
var eRepos []string
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.Name() != "." && file.Name() != logDir && file.IsDir() {
|
if file.Name() != "." && file.Name() != logDir && file.IsDir() {
|
||||||
@@ -1074,7 +574,7 @@ func syncMarchs() {
|
|||||||
log.Fatalf("Can't generate makepkg for %s: %v", march, err)
|
log.Fatalf("Can't generate makepkg for %s: %v", march, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buildManager.build[march] = make(chan *BuildPackage, 10000)
|
buildManager.build[march] = make(chan *ProtoPackage, 10000)
|
||||||
for i := 0; i < conf.Build.Worker; i++ {
|
for i := 0; i < conf.Build.Worker; i++ {
|
||||||
go buildManager.buildWorker(i, march)
|
go buildManager.buildWorker(i, march)
|
||||||
}
|
}
|
||||||
@@ -1082,13 +582,16 @@ func syncMarchs() {
|
|||||||
for _, repo := range conf.Repos {
|
for _, repo := range conf.Repos {
|
||||||
fRepo := fmt.Sprintf("%s-%s", repo, march)
|
fRepo := fmt.Sprintf("%s-%s", repo, march)
|
||||||
repos = append(repos, fRepo)
|
repos = append(repos, fRepo)
|
||||||
buildManager.repoAdd[fRepo] = make(chan []*BuildPackage, conf.Build.Worker)
|
buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, conf.Build.Worker)
|
||||||
buildManager.repoPurge[fRepo] = make(chan []*BuildPackage, 10000)
|
buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 10000)
|
||||||
go buildManager.repoWorker(fRepo)
|
go buildManager.repoWorker(fRepo)
|
||||||
|
|
||||||
if _, err := os.Stat(filepath.Join(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))); os.IsNotExist(err) {
|
if _, err := os.Stat(filepath.Join(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))); os.IsNotExist(err) {
|
||||||
log.Debugf("Creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))
|
log.Debugf("Creating path %s", filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch))
|
||||||
check(os.MkdirAll(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch), 0755))
|
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch), 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if i := find(eRepos, fRepo); i != -1 {
|
if i := find(eRepos, fRepo); i != -1 {
|
||||||
@@ -1101,8 +604,12 @@ func syncMarchs() {
|
|||||||
|
|
||||||
for _, repo := range eRepos {
|
for _, repo := range eRepos {
|
||||||
log.Infof("Removing old repo %s", repo)
|
log.Infof("Removing old repo %s", repo)
|
||||||
check(os.RemoveAll(filepath.Join(conf.Basedir.Repo, repo)))
|
err = os.RemoveAll(filepath.Join(conf.Basedir.Repo, repo))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//goland:noinspection SpellCheckingInspection
|
//goland:noinspection SpellCheckingInspection
|
||||||
@@ -1152,40 +659,6 @@ func setupMakepkg(march string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *BuildPackage) isMirrorLatest(h *alpm.Handle) (bool, alpm.IPackage, string, error) {
|
|
||||||
dbs, err := h.SyncDBs()
|
|
||||||
if err != nil {
|
|
||||||
return false, nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
allDepends := p.Srcinfo.Depends
|
|
||||||
allDepends = append(allDepends, p.Srcinfo.MakeDepends...)
|
|
||||||
|
|
||||||
for _, dep := range allDepends {
|
|
||||||
buildManager.alpmMutex.Lock()
|
|
||||||
pkg, err := dbs.FindSatisfier(dep.Value)
|
|
||||||
buildManager.alpmMutex.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
return false, nil, "", UnableToSatisfyError{err}
|
|
||||||
}
|
|
||||||
|
|
||||||
svn2gitVer, err := (&BuildPackage{
|
|
||||||
Pkgbase: pkg.Base(),
|
|
||||||
}).SVN2GITVersion(h)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil, "", err
|
|
||||||
} else if svn2gitVer == "" {
|
|
||||||
return false, nil, "", fmt.Errorf("no svn2git version")
|
|
||||||
}
|
|
||||||
|
|
||||||
if alpm.VerCmp(svn2gitVer, pkg.Version()) > 0 {
|
|
||||||
return false, pkg, svn2gitVer, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(s interface{}, str string) bool {
|
func contains(s interface{}, str string) bool {
|
||||||
switch v := s.(type) {
|
switch v := s.(type) {
|
||||||
case []string:
|
case []string:
|
||||||
@@ -1233,7 +706,7 @@ func copyFile(src, dst string) (int64, error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer func(source *os.File) {
|
defer func(source *os.File) {
|
||||||
check(source.Close())
|
_ = source.Close()
|
||||||
}(source)
|
}(source)
|
||||||
|
|
||||||
destination, err := os.Create(dst)
|
destination, err := os.Create(dst)
|
||||||
@@ -1241,7 +714,7 @@ func copyFile(src, dst string) (int64, error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer func(destination *os.File) {
|
defer func(destination *os.File) {
|
||||||
check(destination.Close())
|
_ = destination.Close()
|
||||||
}(destination)
|
}(destination)
|
||||||
nBytes, err := io.Copy(destination, source)
|
nBytes, err := io.Copy(destination, source)
|
||||||
return nBytes, err
|
return nBytes, err
|
||||||
|
Reference in New Issue
Block a user