mirror of
https://github.com/rocky-linux/peridot.git
synced 2024-12-25 20:00:27 +00:00
Multiple changes related to build, modules and cloning
* Dependencies are now installed with best=1 * rpmutils.NVR is now equal to srpmproc's rpmutils.Nvr * Add support for cloning a project and swapping target project builds on top * Side NVR repos are now faster to create * Module builds now support side NVRs * Side NVRs now support multiple builds of a certain version * ListBuilds now properly supports filters * yumrepofsupdater now runs on a better node pool * Upgrade srpmproc to v0.4.3 * kubernetes.jsonnet now supports node pool placements * Modulemds are now copied properly to avoid accidental pointer overrides that propagated back to unrelated Mds * rpmimport now properly imports SRPMs and doesn't fail randomly * Yumrepofs now properly adds module defaults in non-all repos * Yumrepofs now properly swaps older module artifacts * Yumrepofs now properly replaces non-project artifacts after a clone swap * Added additional logging to yumrepofs
This commit is contained in:
parent
8222ab2f43
commit
3319abf62b
25 changed files with 775 additions and 170 deletions
|
@ -5,4 +5,5 @@ go_library(
|
||||||
srcs = ["regex.go"],
|
srcs = ["regex.go"],
|
||||||
importpath = "peridot.resf.org/apollo/rpmutils",
|
importpath = "peridot.resf.org/apollo/rpmutils",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
|
deps = ["//vendor/github.com/rocky-linux/srpmproc/pkg/rpmutils"],
|
||||||
)
|
)
|
||||||
|
|
|
@ -30,7 +30,10 @@
|
||||||
|
|
||||||
package rpmutils
|
package rpmutils
|
||||||
|
|
||||||
import "regexp"
|
import (
|
||||||
|
"github.com/rocky-linux/srpmproc/pkg/rpmutils"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
nvr *regexp.Regexp
|
nvr *regexp.Regexp
|
||||||
|
@ -45,14 +48,14 @@ var (
|
||||||
|
|
||||||
func NVR() *regexp.Regexp {
|
func NVR() *regexp.Regexp {
|
||||||
if nvr == nil {
|
if nvr == nil {
|
||||||
nvr = regexp.MustCompile("^(\\S+)-([\\w~%.+]+)-(\\w+(?:\\.[\\w+]+)+?)(?:\\.(\\w+))?(?:\\.rpm)?$")
|
nvr = regexp.MustCompile("^(\\S+)-([\\w~%.+]+)-(\\w+(?:\\.[\\w~%+]+)+?)(?:\\.(\\w+))?(?:\\.rpm)?$")
|
||||||
}
|
}
|
||||||
return nvr
|
return nvr
|
||||||
}
|
}
|
||||||
|
|
||||||
func NVRNoArch() *regexp.Regexp {
|
func NVRNoArch() *regexp.Regexp {
|
||||||
if nvrNoArch == nil {
|
if nvrNoArch == nil {
|
||||||
nvrNoArch = regexp.MustCompile("^(\\S+)-([\\w~%.+]+)-(\\w+(?:\\.[\\w+]+)+?)(?:\\.rpm)?$")
|
nvrNoArch = rpmutils.Nvr
|
||||||
}
|
}
|
||||||
return nvrNoArch
|
return nvrNoArch
|
||||||
}
|
}
|
||||||
|
|
|
@ -228,7 +228,7 @@ local dev() = stage == '-dev';
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
affinity: if !std.objectHas(deployment, 'no_anti_affinity') || !deployment.no_anti_affinity then {
|
affinity: (if !std.objectHas(deployment, 'no_anti_affinity') || !deployment.no_anti_affinity then {
|
||||||
podAntiAffinity: {
|
podAntiAffinity: {
|
||||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||||
{
|
{
|
||||||
|
@ -267,7 +267,33 @@ local dev() = stage == '-dev';
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
} else {}) + (if deployment.node_pool_request != null then {
|
||||||
|
nodeAffinity: {
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution: {
|
||||||
|
nodeSelectorTerms: [
|
||||||
|
{
|
||||||
|
matchExpressions: [
|
||||||
|
{
|
||||||
|
key: deployment.node_pool_request.key,
|
||||||
|
operator: 'In',
|
||||||
|
values: [
|
||||||
|
deployment.node_pool_request.value,
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} else {}),
|
||||||
|
tolerations: if deployment.node_pool_request != null then [
|
||||||
|
{
|
||||||
|
key: deployment.node_pool_request.key,
|
||||||
|
operator: 'Equal',
|
||||||
|
value: deployment.node_pool_request.value,
|
||||||
|
effect: 'NoSchedule',
|
||||||
|
},
|
||||||
|
] else [],
|
||||||
restartPolicy: 'Always',
|
restartPolicy: 'Always',
|
||||||
imagePullSecrets: if std.objectHas(deployment, 'imagePullSecrets') && deployment.imagePullSecrets != null then if std.type(deployment.imagePullSecrets) == 'string' then deployment.imagePullSecrets else [
|
imagePullSecrets: if std.objectHas(deployment, 'imagePullSecrets') && deployment.imagePullSecrets != null then if std.type(deployment.imagePullSecrets) == 'string' then deployment.imagePullSecrets else [
|
||||||
{
|
{
|
||||||
|
|
|
@ -267,6 +267,7 @@ local manifestYamlStream = function (value, indent_array_in_object=false, c_docu
|
||||||
limits: if std.objectHas(info, 'limits') then info.limits,
|
limits: if std.objectHas(info, 'limits') then info.limits,
|
||||||
requests: if std.objectHas(info, 'requests') then info.requests,
|
requests: if std.objectHas(info, 'requests') then info.requests,
|
||||||
args: if std.objectHas(info, 'args') then info.args else [],
|
args: if std.objectHas(info, 'args') then info.args else [],
|
||||||
|
node_pool_request: if std.objectHas(info, 'node_pool_request') then info.node_pool_request else null,
|
||||||
serviceAccount: sa_name,
|
serviceAccount: sa_name,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
|
|
13
go.mod
13
go.mod
|
@ -26,18 +26,18 @@ require (
|
||||||
github.com/gocolly/colly/v2 v2.1.0
|
github.com/gocolly/colly/v2 v2.1.0
|
||||||
github.com/gogo/status v1.1.0
|
github.com/gogo/status v1.1.0
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/feeds v1.1.1 // indirect
|
github.com/gorilla/feeds v1.1.1
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0
|
||||||
github.com/imdario/mergo v0.3.11 // indirect
|
github.com/imdario/mergo v0.3.11 // indirect
|
||||||
github.com/jmoiron/sqlx v1.3.4
|
github.com/jmoiron/sqlx v1.3.4
|
||||||
github.com/lib/pq v1.10.2
|
github.com/lib/pq v1.10.2
|
||||||
github.com/ory/hydra-client-go v1.10.6
|
github.com/ory/hydra-client-go v1.10.6
|
||||||
github.com/pelletier/go-toml v1.8.1 // indirect
|
github.com/pelletier/go-toml v1.8.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.13.0
|
github.com/prometheus/client_golang v1.13.0
|
||||||
github.com/rocky-linux/srpmproc v0.4.2
|
github.com/rocky-linux/srpmproc v0.4.3
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spf13/cobra v1.1.3
|
github.com/spf13/cobra v1.1.3
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
|
@ -50,7 +50,6 @@ require (
|
||||||
go.temporal.io/sdk v1.13.1
|
go.temporal.io/sdk v1.13.1
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247
|
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247
|
||||||
google.golang.org/grpc v1.44.0
|
google.golang.org/grpc v1.44.0
|
||||||
|
@ -61,7 +60,7 @@ require (
|
||||||
k8s.io/apimachinery v0.22.1
|
k8s.io/apimachinery v0.22.1
|
||||||
k8s.io/client-go v0.22.1
|
k8s.io/client-go v0.22.1
|
||||||
openapi.peridot.resf.org/peridotopenapi v0.0.0-00010101000000-000000000000
|
openapi.peridot.resf.org/peridotopenapi v0.0.0-00010101000000-000000000000
|
||||||
peridot.resf.org/apollo/pb v0.0.0-00010101000000-000000000000 // indirect
|
peridot.resf.org/apollo/pb v0.0.0-00010101000000-000000000000
|
||||||
peridot.resf.org/common v0.0.0-00010101000000-000000000000
|
peridot.resf.org/common v0.0.0-00010101000000-000000000000
|
||||||
peridot.resf.org/obsidian/pb v0.0.0-00010101000000-000000000000
|
peridot.resf.org/obsidian/pb v0.0.0-00010101000000-000000000000
|
||||||
peridot.resf.org/peridot/keykeeper/pb v0.0.0-00010101000000-000000000000
|
peridot.resf.org/peridot/keykeeper/pb v0.0.0-00010101000000-000000000000
|
||||||
|
|
11
go.sum
11
go.sum
|
@ -111,7 +111,6 @@ github.com/authzed/grpcutil v0.0.0-20211115181027-063820eb2511 h1:3/LcA84F8rSMZ8
|
||||||
github.com/authzed/grpcutil v0.0.0-20211115181027-063820eb2511/go.mod h1:rqjY3zyK/YP7NID9+B2BdIRRkvnK+cdf9/qya/zaFZE=
|
github.com/authzed/grpcutil v0.0.0-20211115181027-063820eb2511/go.mod h1:rqjY3zyK/YP7NID9+B2BdIRRkvnK+cdf9/qya/zaFZE=
|
||||||
github.com/aws/aws-sdk-go v1.34.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
github.com/aws/aws-sdk-go v1.34.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||||
github.com/aws/aws-sdk-go v1.36.12 h1:YJpKFEMbqEoo+incs5qMe61n1JH3o4O1IMkMexLzJG8=
|
|
||||||
github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.44.129 h1:yld8Rc8OCahLtenY1mnve4w1jVeBu/rSiscGzodaDOs=
|
github.com/aws/aws-sdk-go v1.44.129 h1:yld8Rc8OCahLtenY1mnve4w1jVeBu/rSiscGzodaDOs=
|
||||||
github.com/aws/aws-sdk-go v1.44.129/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
github.com/aws/aws-sdk-go v1.44.129/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||||
|
@ -533,6 +532,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
|
||||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||||
|
@ -663,12 +663,8 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||||
github.com/rocky-linux/srpmproc v0.3.16 h1:kxJEiQsZ0DcMhX0vY482n82XvjPiP2WifxI3NYuyLLM=
|
github.com/rocky-linux/srpmproc v0.4.3 h1:PnfuOQbGZEwOEr1u7Notz9Pm+Ol38Tu7mo8/63rgeaE=
|
||||||
github.com/rocky-linux/srpmproc v0.3.16/go.mod h1:vWZzxPTfxh4pmfr5Mw20FyrqyKsbGHzDwOlN+W5EMpw=
|
github.com/rocky-linux/srpmproc v0.4.3/go.mod h1:x8Z2wqhV2JqRnYMhYz3thOQkfsSWjJkyX8DVGDPOb48=
|
||||||
github.com/rocky-linux/srpmproc v0.4.1 h1:qcq7bGLplKbu+dSKQ9VBwcTao3OqPNb6rdKz58MCFLA=
|
|
||||||
github.com/rocky-linux/srpmproc v0.4.1/go.mod h1:x8Z2wqhV2JqRnYMhYz3thOQkfsSWjJkyX8DVGDPOb48=
|
|
||||||
github.com/rocky-linux/srpmproc v0.4.2 h1:U8SnYPxYtJ2XIAnrcEvxx+PsHCi4uQsfIOIC754MKas=
|
|
||||||
github.com/rocky-linux/srpmproc v0.4.2/go.mod h1:x8Z2wqhV2JqRnYMhYz3thOQkfsSWjJkyX8DVGDPOb48=
|
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
@ -985,7 +981,6 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|
|
@ -5,6 +5,7 @@ go_library(
|
||||||
srcs = [
|
srcs = [
|
||||||
"arch.go",
|
"arch.go",
|
||||||
"build.go",
|
"build.go",
|
||||||
|
"clone_swap.go",
|
||||||
"hashed_repositories.go",
|
"hashed_repositories.go",
|
||||||
"import.go",
|
"import.go",
|
||||||
"infrastructure.go",
|
"infrastructure.go",
|
||||||
|
@ -46,6 +47,7 @@ go_library(
|
||||||
"//vendor/github.com/go-git/go-git/v5/storage/memory",
|
"//vendor/github.com/go-git/go-git/v5/storage/memory",
|
||||||
"//vendor/github.com/gobwas/glob",
|
"//vendor/github.com/gobwas/glob",
|
||||||
"//vendor/github.com/google/uuid",
|
"//vendor/github.com/google/uuid",
|
||||||
|
"//vendor/github.com/pkg/errors",
|
||||||
"//vendor/github.com/rocky-linux/srpmproc/modulemd",
|
"//vendor/github.com/rocky-linux/srpmproc/modulemd",
|
||||||
"//vendor/github.com/rocky-linux/srpmproc/pb",
|
"//vendor/github.com/rocky-linux/srpmproc/pb",
|
||||||
"//vendor/github.com/rocky-linux/srpmproc/pkg/data",
|
"//vendor/github.com/rocky-linux/srpmproc/pkg/data",
|
||||||
|
|
|
@ -395,11 +395,16 @@ obsoletes=1
|
||||||
gpgcheck=0
|
gpgcheck=0
|
||||||
assumeyes=1
|
assumeyes=1
|
||||||
keepcache=1
|
keepcache=1
|
||||||
|
best=1
|
||||||
syslog_ident=peridotbuilder
|
syslog_ident=peridotbuilder
|
||||||
syslog_device=
|
syslog_device=
|
||||||
metadata_expire=0
|
metadata_expire=0
|
||||||
install_weak_deps=0
|
install_weak_deps=0
|
||||||
protected_packages=
|
protected_packages=
|
||||||
|
reposdir=/dev/null
|
||||||
|
logfile=/var/log/yum.log
|
||||||
|
mdpolicy=group:primary
|
||||||
|
metadata_expire=0
|
||||||
user_agent=peridotbuilder`
|
user_agent=peridotbuilder`
|
||||||
|
|
||||||
switch project.TargetVendor {
|
switch project.TargetVendor {
|
||||||
|
@ -535,12 +540,6 @@ config_opts['module_setup_commands'] = [{moduleSetupCommands}]
|
||||||
mockConfig += `
|
mockConfig += `
|
||||||
config_opts['dnf.conf'] = """
|
config_opts['dnf.conf'] = """
|
||||||
{yumConfig}
|
{yumConfig}
|
||||||
reposdir=/dev/null
|
|
||||||
cachedir=/var/cache/yum
|
|
||||||
logfile=/var/log/yum.log
|
|
||||||
mdpolicy=group:primary
|
|
||||||
metadata_expire=0
|
|
||||||
|
|
||||||
`
|
`
|
||||||
|
|
||||||
repos, err := c.repos(project.ID.String(), arch, extra)
|
repos, err := c.repos(project.ID.String(), arch, extra)
|
||||||
|
|
|
@ -413,10 +413,16 @@ func (c *Controller) BuildWorkflow(ctx workflow.Context, req *peridotpb.SubmitBu
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there is a parent task, we need to use that ID as the parent task ID
|
||||||
|
taskID := task.ID.String()
|
||||||
|
if task.ParentTaskId.Valid {
|
||||||
|
taskID = task.ParentTaskId.String
|
||||||
|
}
|
||||||
|
|
||||||
// Create a side repo if the build request specifies side NVRs
|
// Create a side repo if the build request specifies side NVRs
|
||||||
// Packages specified here will be excluded from the main repo
|
// Packages specified here will be excluded from the main repo
|
||||||
if len(req.SideNvrs) > 0 {
|
if len(req.SideNvrs) > 0 {
|
||||||
var buildNvrs []*models.Build
|
var buildNvrs []models.Build
|
||||||
var excludes []string
|
var excludes []string
|
||||||
for _, sideNvr := range req.SideNvrs {
|
for _, sideNvr := range req.SideNvrs {
|
||||||
if !rpmutils.NVRNoArch().MatchString(sideNvr) {
|
if !rpmutils.NVRNoArch().MatchString(sideNvr) {
|
||||||
|
@ -429,7 +435,7 @@ func (c *Controller) BuildWorkflow(ctx workflow.Context, req *peridotpb.SubmitBu
|
||||||
nvrVersion := nvrMatch[2]
|
nvrVersion := nvrMatch[2]
|
||||||
nvrRelease := nvrMatch[3]
|
nvrRelease := nvrMatch[3]
|
||||||
|
|
||||||
buildNvr, err := c.db.GetBuildByPackageNameAndVersionAndRelease(nvrName, nvrVersion, nvrRelease, req.ProjectId)
|
buildNvrsFromBuild, err := c.db.GetBuildByPackageNameAndVersionAndRelease(nvrName, nvrVersion, nvrRelease)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
err = fmt.Errorf("side NVR %s not found in project %s", sideNvr, req.ProjectId)
|
err = fmt.Errorf("side NVR %s not found in project %s", sideNvr, req.ProjectId)
|
||||||
|
@ -441,21 +447,23 @@ func (c *Controller) BuildWorkflow(ctx workflow.Context, req *peridotpb.SubmitBu
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
artifacts, err := c.db.GetArtifactsForBuild(buildNvr.ID.String())
|
for _, buildNvr := range buildNvrsFromBuild {
|
||||||
if err != nil {
|
artifacts, err := c.db.GetArtifactsForBuild(buildNvr.ID.String())
|
||||||
err = fmt.Errorf("could not get artifacts for build %s: %v", buildNvr.ID, err)
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
err = fmt.Errorf("could not get artifacts for build %s: %v", buildNvr.ID, err)
|
||||||
return nil, err
|
setInternalError(errorDetails, err)
|
||||||
}
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
artifactName := strings.TrimPrefix(artifact.Name, fmt.Sprintf("%s/", buildNvr.TaskId))
|
artifactName := strings.TrimPrefix(artifact.Name, fmt.Sprintf("%s/", buildNvr.TaskId))
|
||||||
if rpmutils.NVR().MatchString(artifactName) {
|
if rpmutils.NVR().MatchString(artifactName) {
|
||||||
excludes = append(excludes, rpmutils.NVR().FindStringSubmatch(artifactName)[1])
|
excludes = append(excludes, rpmutils.NVR().FindStringSubmatch(artifactName)[1])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildNvrs = append(buildNvrs, buildNvr)
|
buildNvrs = append(buildNvrs, buildNvrsFromBuild...)
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := c.db.CreateRepositoryWithPackages(uuid.New().String(), req.ProjectId, true, []string{})
|
repo, err := c.db.CreateRepositoryWithPackages(uuid.New().String(), req.ProjectId, true, []string{})
|
||||||
|
@ -477,33 +485,31 @@ func (c *Controller) BuildWorkflow(ctx workflow.Context, req *peridotpb.SubmitBu
|
||||||
})
|
})
|
||||||
extraOptions.ExcludePackages = excludes
|
extraOptions.ExcludePackages = excludes
|
||||||
|
|
||||||
for _, buildNvr := range buildNvrs {
|
var buildIds []string
|
||||||
yumrepoCtx := workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{
|
for _, build := range buildNvrs {
|
||||||
TaskQueue: "yumrepofs",
|
buildIds = append(buildIds, build.ID.String())
|
||||||
})
|
}
|
||||||
updateRepoRequest := &UpdateRepoRequest{
|
|
||||||
ProjectID: req.ProjectId,
|
yumrepoCtx := workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{
|
||||||
TaskID: &buildNvr.TaskId,
|
TaskQueue: "yumrepofs",
|
||||||
BuildIDs: []string{buildNvr.ID.String()},
|
})
|
||||||
Delete: false,
|
updateRepoRequest := &UpdateRepoRequest{
|
||||||
ForceRepoId: repo.ID.String(),
|
ProjectID: req.ProjectId,
|
||||||
ForceNonModular: true,
|
TaskID: &taskID,
|
||||||
DisableSigning: true,
|
BuildIDs: buildIds,
|
||||||
DisableSetActive: true,
|
Delete: false,
|
||||||
}
|
ForceRepoId: repo.ID.String(),
|
||||||
updateRepoTask := &yumrepofspb.UpdateRepoTask{}
|
ForceNonModular: true,
|
||||||
err = workflow.ExecuteChildWorkflow(yumrepoCtx, c.RepoUpdaterWorkflow, updateRepoRequest).Get(ctx, updateRepoTask)
|
DisableSigning: true,
|
||||||
if err != nil {
|
DisableSetActive: true,
|
||||||
return nil, err
|
}
|
||||||
}
|
updateRepoTask := &yumrepofspb.UpdateRepoTask{}
|
||||||
|
err = workflow.ExecuteChildWorkflow(yumrepoCtx, c.RepoUpdaterWorkflow, updateRepoRequest).Get(ctx, updateRepoTask)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a parent task, we need to use that ID as the parent task ID
|
|
||||||
taskID := task.ID.String()
|
|
||||||
if task.ParentTaskId.Valid {
|
|
||||||
taskID = task.ParentTaskId.String
|
|
||||||
}
|
|
||||||
buildID := extraOptions.ReusableBuildId
|
buildID := extraOptions.ReusableBuildId
|
||||||
if buildID == "" {
|
if buildID == "" {
|
||||||
err = errors.New("reusable build id not found")
|
err = errors.New("reusable build id not found")
|
||||||
|
|
260
peridot/builder/v1/workflow/clone_swap.go
Normal file
260
peridot/builder/v1/workflow/clone_swap.go
Normal file
|
@ -0,0 +1,260 @@
|
||||||
|
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||||
|
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||||
|
// may be used to endorse or promote products derived from this software without
|
||||||
|
// specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||||
|
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
// POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package workflow
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"go.temporal.io/sdk/temporal"
|
||||||
|
"go.temporal.io/sdk/workflow"
|
||||||
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
"peridot.resf.org/peridot/db/models"
|
||||||
|
peridotpb "peridot.resf.org/peridot/pb"
|
||||||
|
yumrepofspb "peridot.resf.org/peridot/yumrepofs/pb"
|
||||||
|
"peridot.resf.org/utils"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CloneSwapStep1 struct {
|
||||||
|
Batches [][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) getSingleProject(projectId string) (*models.Project, error) {
|
||||||
|
projects, err := c.db.ListProjects(&peridotpb.ProjectFilters{
|
||||||
|
Id: wrapperspb.String(projectId),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not list projects")
|
||||||
|
}
|
||||||
|
if len(projects) == 0 {
|
||||||
|
return nil, errors.New("no projects found")
|
||||||
|
}
|
||||||
|
|
||||||
|
p := projects[0]
|
||||||
|
return &p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) CloneSwapWorkflow(ctx workflow.Context, req *peridotpb.CloneSwapRequest, task *models.Task) (*peridotpb.CloneSwapTask, error) {
|
||||||
|
var ret peridotpb.CloneSwapTask
|
||||||
|
deferTask, errorDetails, err := c.commonCreateTask(task, &ret)
|
||||||
|
defer deferTask()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var step1 CloneSwapStep1
|
||||||
|
syncCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
||||||
|
ScheduleToStartTimeout: 25 * time.Minute,
|
||||||
|
StartToCloseTimeout: 60 * time.Minute,
|
||||||
|
TaskQueue: c.mainQueue,
|
||||||
|
RetryPolicy: &temporal.RetryPolicy{
|
||||||
|
MaximumAttempts: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
err = workflow.ExecuteActivity(syncCtx, c.CloneSwapActivity, req, task).Get(ctx, &step1)
|
||||||
|
if err != nil {
|
||||||
|
setActivityError(errorDetails, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're going to create a workflow for each batch of builds.
|
||||||
|
for _, batch := range step1.Batches {
|
||||||
|
yumrepoCtx := workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{
|
||||||
|
TaskQueue: "yumrepofs",
|
||||||
|
})
|
||||||
|
taskID := task.ID.String()
|
||||||
|
updateRepoRequest := &UpdateRepoRequest{
|
||||||
|
ProjectID: req.TargetProjectId.Value,
|
||||||
|
BuildIDs: batch,
|
||||||
|
Delete: false,
|
||||||
|
TaskID: &taskID,
|
||||||
|
NoDeletePrevious: true,
|
||||||
|
}
|
||||||
|
updateRepoTask := &yumrepofspb.UpdateRepoTask{}
|
||||||
|
err = workflow.ExecuteChildWorkflow(yumrepoCtx, c.RepoUpdaterWorkflow, updateRepoRequest).Get(yumrepoCtx, updateRepoTask)
|
||||||
|
if err != nil {
|
||||||
|
setActivityError(errorDetails, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var flattenedBuilds []string
|
||||||
|
for _, batch := range step1.Batches {
|
||||||
|
flattenedBuilds = append(flattenedBuilds, batch...)
|
||||||
|
}
|
||||||
|
|
||||||
|
task.Status = peridotpb.TaskStatus_TASK_STATUS_SUCCEEDED
|
||||||
|
|
||||||
|
ret.TargetProjectId = req.TargetProjectId.Value
|
||||||
|
ret.SrcProjectId = req.SrcProjectId.Value
|
||||||
|
ret.BuildIdsLayered = flattenedBuilds
|
||||||
|
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) CloneSwapActivity(ctx context.Context, req *peridotpb.CloneSwapRequest, task *models.Task) (*CloneSwapStep1, error) {
|
||||||
|
srcProject, err := c.getSingleProject(req.SrcProjectId.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
targetProject, err := c.getSingleProject(req.TargetProjectId.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're going to fetch all repositories in the source project, and then
|
||||||
|
// copy them to the target project.
|
||||||
|
srcRepos, err := c.db.FindRepositoriesForProject(srcProject.ID.String(), nil, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
beginTx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not begin transaction")
|
||||||
|
}
|
||||||
|
tx := c.db.UseTransaction(beginTx)
|
||||||
|
|
||||||
|
for _, srcRepo := range srcRepos {
|
||||||
|
_ = c.logToMon(
|
||||||
|
[]string{fmt.Sprintf("Processing %s/%s", srcProject.Name, srcRepo.Name)},
|
||||||
|
task.ID.String(),
|
||||||
|
utils.NullStringToEmptyString(task.ParentTaskId),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Check if the repo exists in the target project.
|
||||||
|
dbRepo, err := tx.GetRepository(nil, &srcRepo.Name, &req.TargetProjectId.Value)
|
||||||
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
return nil, errors.Wrap(err, "could not get repository")
|
||||||
|
}
|
||||||
|
if dbRepo == nil {
|
||||||
|
// The repo doesn't exist in the target project, so we need to create it.
|
||||||
|
dbRepo, err = tx.CreateRepositoryWithPackages(srcRepo.Name, req.TargetProjectId.Value, false, srcRepo.Packages)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not create repository")
|
||||||
|
}
|
||||||
|
_ = c.logToMon(
|
||||||
|
[]string{fmt.Sprintf("Created %s/%s", targetProject.Name, dbRepo.Name)},
|
||||||
|
task.ID.String(),
|
||||||
|
utils.NullStringToEmptyString(task.ParentTaskId),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
allArches := append(srcProject.Archs, "src")
|
||||||
|
for _, a := range allArches {
|
||||||
|
allArches = append(allArches, a+"-debug")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, arch := range allArches {
|
||||||
|
srcRepoLatestRevision, err := tx.GetLatestActiveRepositoryRevision(srcRepo.ID.String(), arch)
|
||||||
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
return nil, errors.Wrap(err, "could not get latest active repository revision")
|
||||||
|
}
|
||||||
|
if srcRepoLatestRevision == nil {
|
||||||
|
_ = c.logToMon(
|
||||||
|
[]string{fmt.Sprintf("Skipping %s/%s/%s because it has no active revisions", srcProject.Name, srcRepo.Name, arch)},
|
||||||
|
task.ID.String(),
|
||||||
|
utils.NullStringToEmptyString(task.ParentTaskId),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
id := uuid.New()
|
||||||
|
_, err = tx.CreateRevisionForRepository(
|
||||||
|
id.String(),
|
||||||
|
dbRepo.ID.String(),
|
||||||
|
arch,
|
||||||
|
srcRepoLatestRevision.RepomdXml,
|
||||||
|
srcRepoLatestRevision.PrimaryXml,
|
||||||
|
srcRepoLatestRevision.FilelistsXml,
|
||||||
|
srcRepoLatestRevision.OtherXml,
|
||||||
|
srcRepoLatestRevision.UpdateinfoXml,
|
||||||
|
srcRepoLatestRevision.ModuleDefaultsYaml,
|
||||||
|
srcRepoLatestRevision.ModulesYaml,
|
||||||
|
srcRepoLatestRevision.GroupsXml,
|
||||||
|
srcRepoLatestRevision.UrlMappings.String(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not create repository revision")
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = c.logToMon(
|
||||||
|
[]string{fmt.Sprintf("Created revision %s for %s/%s/%s", id.String(), targetProject.Name, srcRepo.Name, arch)},
|
||||||
|
task.ID.String(),
|
||||||
|
utils.NullStringToEmptyString(task.ParentTaskId),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now let's get all succeeded builds for the target project.
|
||||||
|
builds, err := tx.GetSuccessfulBuildIDsAsc(req.TargetProjectId.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not list builds")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're creating batches of 200 builds to process at a time.
|
||||||
|
var syncBatches [][]string
|
||||||
|
for i := 0; i < len(builds); i += 200 {
|
||||||
|
end := i + 200
|
||||||
|
if end > len(builds) {
|
||||||
|
end = len(builds)
|
||||||
|
}
|
||||||
|
syncBatches = append(syncBatches, builds[i:end])
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = c.logToMon(
|
||||||
|
[]string{
|
||||||
|
fmt.Sprintf("Created %d batches", len(syncBatches)),
|
||||||
|
"Following builds will be synced:",
|
||||||
|
},
|
||||||
|
task.ID.String(),
|
||||||
|
utils.NullStringToEmptyString(task.ParentTaskId),
|
||||||
|
)
|
||||||
|
for _, id := range builds {
|
||||||
|
_ = c.logToMon(
|
||||||
|
[]string{fmt.Sprintf("\t* %s", id)},
|
||||||
|
task.ID.String(),
|
||||||
|
utils.NullStringToEmptyString(task.ParentTaskId),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = beginTx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not commit transaction")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &CloneSwapStep1{
|
||||||
|
Batches: syncBatches,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -578,6 +578,7 @@ func (c *Controller) BuildModuleStreamWorkflow(ctx workflow.Context, req *perido
|
||||||
},
|
},
|
||||||
ScmHash: wrapperspb.String(component.Ref),
|
ScmHash: wrapperspb.String(component.Ref),
|
||||||
DisableChecks: req.DisableChecks,
|
DisableChecks: req.DisableChecks,
|
||||||
|
SideNvrs: req.SideNvrs,
|
||||||
}
|
}
|
||||||
extraOptions := &peridotpb.ExtraBuildOptions{
|
extraOptions := &peridotpb.ExtraBuildOptions{
|
||||||
DisableYumrepofsUpdates: true,
|
DisableYumrepofsUpdates: true,
|
||||||
|
@ -726,11 +727,12 @@ func (c *Controller) BuildModuleStreamWorkflow(ctx workflow.Context, req *perido
|
||||||
|
|
||||||
// Generate a modulemd for each arch
|
// Generate a modulemd for each arch
|
||||||
for _, arch := range streamBuildOptions.Project.Archs {
|
for _, arch := range streamBuildOptions.Project.Archs {
|
||||||
err := fillInRpmArtifactsForModuleMd(md, streamBuildOptions, buildTask, artifactPrimaryIndex, arch, licenses, false)
|
newMd := copyModuleMd(*md)
|
||||||
|
err := fillInRpmArtifactsForModuleMd(newMd, streamBuildOptions, buildTask, artifactPrimaryIndex, arch, licenses, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = fillInRpmArtifactsForModuleMd(md, streamBuildOptions, buildTask, artifactPrimaryIndex, arch, licenses, true)
|
err = fillInRpmArtifactsForModuleMd(newMd, streamBuildOptions, buildTask, artifactPrimaryIndex, arch, licenses, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -793,8 +795,153 @@ func doesRpmPassFilter(artifact *ArtifactIndex, md *modulemd.ModuleMd, arch stri
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func copyModuleMd(md modulemd.ModuleMd) *modulemd.ModuleMd {
|
||||||
|
ret := modulemd.ModuleMd{
|
||||||
|
Document: md.Document,
|
||||||
|
Version: md.Version,
|
||||||
|
Data: &modulemd.Data{
|
||||||
|
Name: md.Data.Name,
|
||||||
|
Stream: md.Data.Stream,
|
||||||
|
Version: md.Data.Version,
|
||||||
|
StaticContext: md.Data.StaticContext,
|
||||||
|
Context: md.Data.Context,
|
||||||
|
Arch: md.Data.Arch,
|
||||||
|
Summary: md.Data.Summary,
|
||||||
|
Description: md.Data.Description,
|
||||||
|
ServiceLevels: map[modulemd.ServiceLevelType]*modulemd.ServiceLevel{},
|
||||||
|
License: &modulemd.License{},
|
||||||
|
Xmd: map[string]map[string]string{},
|
||||||
|
Dependencies: []*modulemd.Dependencies{},
|
||||||
|
References: &modulemd.References{},
|
||||||
|
Profiles: map[string]*modulemd.Profile{},
|
||||||
|
Profile: map[string]*modulemd.Profile{},
|
||||||
|
API: &modulemd.API{},
|
||||||
|
Filter: &modulemd.API{},
|
||||||
|
BuildOpts: &modulemd.BuildOpts{},
|
||||||
|
Components: &modulemd.Components{},
|
||||||
|
Artifacts: &modulemd.Artifacts{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if md.Data.ServiceLevels != nil {
|
||||||
|
for k, v := range md.Data.ServiceLevels {
|
||||||
|
c := *v
|
||||||
|
ret.Data.ServiceLevels[k] = &c
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret.Data.ServiceLevels = nil
|
||||||
|
}
|
||||||
|
if md.Data.License != nil {
|
||||||
|
c := *md.Data.License
|
||||||
|
ret.Data.License = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.License = nil
|
||||||
|
}
|
||||||
|
if md.Data.Xmd != nil {
|
||||||
|
for k, v := range md.Data.Xmd {
|
||||||
|
c := map[string]string{}
|
||||||
|
for k2, v2 := range v {
|
||||||
|
c[k2] = v2
|
||||||
|
}
|
||||||
|
ret.Data.Xmd[k] = c
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret.Data.Xmd = nil
|
||||||
|
}
|
||||||
|
if md.Data.Dependencies != nil {
|
||||||
|
for _, v := range md.Data.Dependencies {
|
||||||
|
c := *v
|
||||||
|
ret.Data.Dependencies = append(ret.Data.Dependencies, &c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if md.Data.References != nil {
|
||||||
|
c := *md.Data.References
|
||||||
|
ret.Data.References = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.References = nil
|
||||||
|
}
|
||||||
|
if md.Data.Profiles != nil {
|
||||||
|
for k, v := range md.Data.Profiles {
|
||||||
|
c := *v
|
||||||
|
ret.Data.Profiles[k] = &c
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret.Data.Profiles = nil
|
||||||
|
}
|
||||||
|
if md.Data.Profile != nil {
|
||||||
|
for k, v := range md.Data.Profile {
|
||||||
|
c := *v
|
||||||
|
ret.Data.Profile[k] = &c
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret.Data.Profile = nil
|
||||||
|
}
|
||||||
|
if md.Data.API != nil {
|
||||||
|
c := *md.Data.API
|
||||||
|
ret.Data.API = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.API = nil
|
||||||
|
}
|
||||||
|
if md.Data.Filter != nil {
|
||||||
|
c := *md.Data.Filter
|
||||||
|
ret.Data.Filter = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.Filter = nil
|
||||||
|
}
|
||||||
|
if md.Data.BuildOpts != nil {
|
||||||
|
c := *md.Data.BuildOpts
|
||||||
|
if md.Data.BuildOpts.Rpms != nil {
|
||||||
|
rpms := *md.Data.BuildOpts.Rpms
|
||||||
|
c.Rpms = &rpms
|
||||||
|
}
|
||||||
|
ret.Data.BuildOpts = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.BuildOpts = nil
|
||||||
|
}
|
||||||
|
if md.Data.Components != nil {
|
||||||
|
c := *md.Data.Components
|
||||||
|
if md.Data.Components.Rpms != nil {
|
||||||
|
rpms := map[string]*modulemd.ComponentRPM{}
|
||||||
|
for k, v := range md.Data.Components.Rpms {
|
||||||
|
x := *v
|
||||||
|
rpms[k] = &x
|
||||||
|
}
|
||||||
|
c.Rpms = rpms
|
||||||
|
}
|
||||||
|
if md.Data.Components.Modules != nil {
|
||||||
|
modules := map[string]*modulemd.ComponentModule{}
|
||||||
|
for k, v := range md.Data.Components.Modules {
|
||||||
|
x := *v
|
||||||
|
modules[k] = &x
|
||||||
|
}
|
||||||
|
c.Modules = modules
|
||||||
|
}
|
||||||
|
ret.Data.Components = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.Components = nil
|
||||||
|
}
|
||||||
|
if md.Data.Artifacts != nil {
|
||||||
|
c := *md.Data.Artifacts
|
||||||
|
if md.Data.Artifacts.RpmMap != nil {
|
||||||
|
rpmMap := map[string]map[string]*modulemd.ArtifactsRPMMap{}
|
||||||
|
for k, v := range md.Data.Artifacts.RpmMap {
|
||||||
|
x := map[string]*modulemd.ArtifactsRPMMap{}
|
||||||
|
for k2, v2 := range v {
|
||||||
|
y := *v2
|
||||||
|
x[k2] = &y
|
||||||
|
}
|
||||||
|
rpmMap[k] = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret.Data.Artifacts = &c
|
||||||
|
} else {
|
||||||
|
ret.Data.Artifacts = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ret
|
||||||
|
}
|
||||||
|
|
||||||
func fillInRpmArtifactsForModuleMd(md *modulemd.ModuleMd, streamBuildOptions *ModuleStreamBuildOptions, buildTask *peridotpb.ModuleStream, artifactPrimaryIndex map[string]ArtifactIndex, arch string, licenses []string, devel bool) error {
|
func fillInRpmArtifactsForModuleMd(md *modulemd.ModuleMd, streamBuildOptions *ModuleStreamBuildOptions, buildTask *peridotpb.ModuleStream, artifactPrimaryIndex map[string]ArtifactIndex, arch string, licenses []string, devel bool) error {
|
||||||
newMd := *md
|
newMd := copyModuleMd(*md)
|
||||||
// Set version, context, arch and licenses
|
// Set version, context, arch and licenses
|
||||||
newMd.Data.Version = streamBuildOptions.Version
|
newMd.Data.Version = streamBuildOptions.Version
|
||||||
newMd.Data.Context = streamBuildOptions.Context
|
newMd.Data.Context = streamBuildOptions.Context
|
||||||
|
@ -973,8 +1120,8 @@ func fillInRpmArtifactsForModuleMd(md *modulemd.ModuleMd, streamBuildOptions *Mo
|
||||||
|
|
||||||
streamName := streamBuildOptions.Stream
|
streamName := streamBuildOptions.Stream
|
||||||
if devel {
|
if devel {
|
||||||
newMd.Data.Name = newMd.Data.Name + "-devel"
|
newMd.Data.Name = streamBuildOptions.Name + "-devel"
|
||||||
streamName += "-devel"
|
streamName = streamBuildOptions.Stream + "-devel"
|
||||||
}
|
}
|
||||||
|
|
||||||
yamlBytes, err := yaml.Marshal(&newMd)
|
yamlBytes, err := yaml.Marshal(&newMd)
|
||||||
|
|
|
@ -323,20 +323,13 @@ func (c *Controller) RpmImportActivity(ctx context.Context, req *peridotpb.RpmIm
|
||||||
var nvr string
|
var nvr string
|
||||||
for _, rpmObj := range rpms {
|
for _, rpmObj := range rpms {
|
||||||
realNvr := rpmObj.String()
|
realNvr := rpmObj.String()
|
||||||
if rpmObj.SourceRPM() == "" && rpmObj.Architecture() == "i686" {
|
|
||||||
realNvr = strings.ReplaceAll(realNvr, ".i686", ".src")
|
|
||||||
}
|
|
||||||
if nvr == "" {
|
if nvr == "" {
|
||||||
nvr = rpmObj.SourceRPM()
|
nvr = rpmObj.SourceRPM()
|
||||||
if nvr == "" && rpmObj.Architecture() == "i686" {
|
if nvr == "" {
|
||||||
nvr = realNvr
|
nvr = realNvr
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
} else {
|
|
||||||
if nvr != rpmObj.SourceRPM() && nvr != fmt.Sprintf("%s.rpm", realNvr) {
|
|
||||||
return nil, fmt.Errorf("only include RPMs from one package")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !rpmutils.NVR().MatchString(nvr) {
|
if !rpmutils.NVR().MatchString(nvr) {
|
||||||
|
|
|
@ -926,7 +926,7 @@ func (c *Controller) SyncCatalogActivity(req *peridotpb.SyncCatalogRequest) (*pe
|
||||||
if utils.StrContains(newPackage, newBuildPackages) {
|
if utils.StrContains(newPackage, newBuildPackages) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dbIDs, err := c.db.GetLatestBuildIdsByPackageName(newPackage, req.ProjectId.Value)
|
dbIDs, err := c.db.GetLatestBuildIdsByPackageName(newPackage, &req.ProjectId.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
continue
|
continue
|
||||||
|
@ -949,7 +949,7 @@ func (c *Controller) SyncCatalogActivity(req *peridotpb.SyncCatalogRequest) (*pe
|
||||||
if utils.StrContains(newPackage, newBuildPackages) {
|
if utils.StrContains(newPackage, newBuildPackages) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dbIDs, err := c.db.GetLatestBuildIdsByPackageName(newPackage, req.ProjectId.Value)
|
dbIDs, err := c.db.GetLatestBuildIdsByPackageName(newPackage, &req.ProjectId.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -379,7 +379,7 @@ func (c *Controller) generateIndexedModuleDefaults(projectId string) (map[string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
index[fmt.Sprintf("module:%s:%s", moduleDefault.Name, moduleDefault.Stream)] = document
|
index[moduleDefault.Name] = document
|
||||||
}
|
}
|
||||||
|
|
||||||
return index, nil
|
return index, nil
|
||||||
|
@ -445,7 +445,7 @@ func (c *Controller) RepoUpdaterWorkflow(ctx workflow.Context, req *UpdateRepoRe
|
||||||
updateRepoCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
updateRepoCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
||||||
ScheduleToStartTimeout: 25 * time.Hour,
|
ScheduleToStartTimeout: 25 * time.Hour,
|
||||||
StartToCloseTimeout: 30 * time.Hour,
|
StartToCloseTimeout: 30 * time.Hour,
|
||||||
HeartbeatTimeout: 3 * time.Minute,
|
HeartbeatTimeout: 30 * time.Second,
|
||||||
TaskQueue: c.mainQueue,
|
TaskQueue: c.mainQueue,
|
||||||
// Yumrepofs is locking for a short period so let's not wait too long to retry
|
// Yumrepofs is locking for a short period so let's not wait too long to retry
|
||||||
RetryPolicy: &temporal.RetryPolicy{
|
RetryPolicy: &temporal.RetryPolicy{
|
||||||
|
@ -695,6 +695,7 @@ func (c *Controller) UpdateRepoActivity(ctx context.Context, req *UpdateRepoRequ
|
||||||
updateRepoTask.Changes = reducedChanges
|
updateRepoTask.Changes = reducedChanges
|
||||||
|
|
||||||
for _, repo := range cache.Repos {
|
for _, repo := range cache.Repos {
|
||||||
|
c.log.Infof("processing repo %s - %s", repo.Repo.Name, repo.Repo.ID.String())
|
||||||
primaryRoot := repo.PrimaryRoot
|
primaryRoot := repo.PrimaryRoot
|
||||||
filelistsRoot := repo.FilelistsRoot
|
filelistsRoot := repo.FilelistsRoot
|
||||||
otherRoot := repo.OtherRoot
|
otherRoot := repo.OtherRoot
|
||||||
|
@ -951,7 +952,7 @@ func (c *Controller) UpdateRepoActivity(ctx context.Context, req *UpdateRepoRequ
|
||||||
|
|
||||||
// todo(mustafa): Convert to request struct
|
// todo(mustafa): Convert to request struct
|
||||||
func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest, errorDetails *peridotpb.TaskErrorDetails, packageName string, buildId string, moduleStream *peridotpb.ModuleStream, gpgId *string, signArtifactsTasks *keykeeperpb.BatchSignArtifactsTask, cache *Cache) (*yumrepofspb.UpdateRepoTask, error) {
|
func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest, errorDetails *peridotpb.TaskErrorDetails, packageName string, buildId string, moduleStream *peridotpb.ModuleStream, gpgId *string, signArtifactsTasks *keykeeperpb.BatchSignArtifactsTask, cache *Cache) (*yumrepofspb.UpdateRepoTask, error) {
|
||||||
build, err := tx.GetBuild(req.ProjectID, buildId)
|
build, err := c.db.GetBuildByID(buildId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Errorf("error getting build: %v", err)
|
c.log.Errorf("error getting build: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -970,7 +971,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
project := projects[0]
|
project := projects[0]
|
||||||
|
|
||||||
artifacts, err := tx.GetArtifactsForBuild(buildId)
|
artifacts, err := c.db.GetArtifactsForBuild(buildId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to get artifacts for build: %v", err)
|
return nil, fmt.Errorf("failed to get artifacts for build: %v", err)
|
||||||
|
@ -980,13 +981,13 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
var skipDeleteArtifacts []string
|
var skipDeleteArtifacts []string
|
||||||
if moduleStream == nil {
|
if moduleStream == nil {
|
||||||
// Get currently active artifacts
|
// Get currently active artifacts
|
||||||
latestBuilds, err := tx.GetLatestBuildIdsByPackageName(build.PackageName, project.ID.String())
|
latestBuilds, err := c.db.GetLatestBuildIdsByPackageName(build.PackageName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to get latest build ids: %v", err)
|
return nil, fmt.Errorf("failed to get latest build ids: %v", err)
|
||||||
}
|
}
|
||||||
for _, latestBuild := range latestBuilds {
|
for _, latestBuild := range latestBuilds {
|
||||||
buildArtifacts, err := tx.GetArtifactsForBuild(latestBuild)
|
buildArtifacts, err := c.db.GetArtifactsForBuild(latestBuild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to get artifacts for build: %v", err)
|
return nil, fmt.Errorf("failed to get artifacts for build: %v", err)
|
||||||
|
@ -995,13 +996,13 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Get currently active artifacts
|
// Get currently active artifacts
|
||||||
latestBuilds, err := tx.GetLatestBuildsByPackageNameAndBranchName(build.PackageName, moduleStream.ImportRevision.ScmBranchName, project.ID.String())
|
latestBuilds, err := c.db.GetBuildIDsByPackageNameAndBranchName(build.PackageName, moduleStream.Stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to get latest build ids: %v", err)
|
return nil, fmt.Errorf("failed to get latest build ids: %v", err)
|
||||||
}
|
}
|
||||||
for _, latestBuild := range latestBuilds {
|
for _, latestBuild := range latestBuilds {
|
||||||
buildArtifacts, err := tx.GetArtifactsForBuild(latestBuild)
|
buildArtifacts, err := c.db.GetArtifactsForBuild(latestBuild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to get artifacts for build: %v", err)
|
return nil, fmt.Errorf("failed to get artifacts for build: %v", err)
|
||||||
|
@ -1019,7 +1020,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
|
|
||||||
if req.ForceRepoId != "" {
|
if req.ForceRepoId != "" {
|
||||||
var err error
|
var err error
|
||||||
repo, err := tx.GetRepository(&req.ForceRepoId, nil, nil)
|
repo, err := c.db.GetRepository(&req.ForceRepoId, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to get repo: %v", err)
|
return nil, fmt.Errorf("failed to get repo: %v", err)
|
||||||
|
@ -1027,7 +1028,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
repos = models.Repositories{*repo}
|
repos = models.Repositories{*repo}
|
||||||
} else {
|
} else {
|
||||||
var err error
|
var err error
|
||||||
repos, err = tx.FindRepositoriesForPackage(req.ProjectID, packageName, false)
|
repos, err = c.db.FindRepositoriesForPackage(req.ProjectID, packageName, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, fmt.Errorf("failed to find repo: %v", err)
|
return nil, fmt.Errorf("failed to find repo: %v", err)
|
||||||
|
@ -1041,39 +1042,13 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
|
c.log.Infof("repo: %s, buildId: %s", repo.ID.String(), buildId)
|
||||||
artifactArchMap, err := GenerateArchMapForArtifacts(artifacts, &project, &repo)
|
artifactArchMap, err := GenerateArchMapForArtifacts(artifacts, &project, &repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setInternalError(errorDetails, err)
|
setInternalError(errorDetails, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
c.log.Infof("generated arch map for build id %s", buildId)
|
||||||
var moduleDefaults []*modulemd.Defaults
|
|
||||||
if defaultsIndex != nil {
|
|
||||||
for module, defaults := range defaultsIndex {
|
|
||||||
if utils.StrContains(module, repo.Packages) || len(repo.Packages) == 0 {
|
|
||||||
moduleDefaults = append(moduleDefaults, &*defaults)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultsYaml []byte
|
|
||||||
|
|
||||||
if len(moduleDefaults) > 0 {
|
|
||||||
var defaultsBuf bytes.Buffer
|
|
||||||
_, _ = defaultsBuf.WriteString("---\n")
|
|
||||||
defaultsEncoder := yaml.NewEncoder(&defaultsBuf)
|
|
||||||
for _, def := range moduleDefaults {
|
|
||||||
err := defaultsEncoder.Encode(def)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to encode defaults: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = defaultsEncoder.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to close defaults encoder: %v", err)
|
|
||||||
}
|
|
||||||
defaultsYaml = defaultsBuf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
var compiledExcludeGlobs []*CompiledGlobFilter
|
var compiledExcludeGlobs []*CompiledGlobFilter
|
||||||
|
|
||||||
|
@ -1104,6 +1079,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
for arch, archArtifacts := range artifactArchMap {
|
for arch, archArtifacts := range artifactArchMap {
|
||||||
|
c.log.Infof("arch: %s, buildId: %s", arch, buildId)
|
||||||
noDebugArch := strings.TrimSuffix(arch, "-debug")
|
noDebugArch := strings.TrimSuffix(arch, "-debug")
|
||||||
var streamDocument *modulemd.ModuleMd
|
var streamDocument *modulemd.ModuleMd
|
||||||
|
|
||||||
|
@ -1185,11 +1161,13 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
var currentRevision *models.RepositoryRevision
|
var currentRevision *models.RepositoryRevision
|
||||||
var groupsXml string
|
var groupsXml string
|
||||||
if cache.Repos[idArchNoDebug] != nil {
|
if cache.Repos[idArchNoDebug] != nil {
|
||||||
|
c.log.Infof("found cache for %s", idArchNoDebug)
|
||||||
if cache.Repos[idArchNoDebug].Modulemd != nil {
|
if cache.Repos[idArchNoDebug].Modulemd != nil {
|
||||||
modulesRoot = cache.Repos[idArchNoDebug].Modulemd
|
modulesRoot = cache.Repos[idArchNoDebug].Modulemd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cache.Repos[idArch] != nil {
|
if cache.Repos[idArch] != nil {
|
||||||
|
c.log.Infof("found cache for %s", idArch)
|
||||||
if cache.Repos[idArch].PrimaryRoot != nil {
|
if cache.Repos[idArch].PrimaryRoot != nil {
|
||||||
primaryRoot = *cache.Repos[idArch].PrimaryRoot
|
primaryRoot = *cache.Repos[idArch].PrimaryRoot
|
||||||
}
|
}
|
||||||
|
@ -1201,16 +1179,18 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
groupsXml = cache.Repos[idArch].GroupsXml
|
groupsXml = cache.Repos[idArch].GroupsXml
|
||||||
} else {
|
} else {
|
||||||
|
c.log.Infof("no cache for %s", idArch)
|
||||||
var noDebugRevision *models.RepositoryRevision
|
var noDebugRevision *models.RepositoryRevision
|
||||||
|
|
||||||
currentRevision, err = tx.GetLatestActiveRepositoryRevision(repo.ID.String(), arch)
|
currentRevision, err = c.db.GetLatestActiveRepositoryRevision(repo.ID.String(), arch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != sql.ErrNoRows {
|
if err != sql.ErrNoRows {
|
||||||
return nil, fmt.Errorf("failed to get latest active repository revision: %v", err)
|
return nil, fmt.Errorf("failed to get latest active repository revision: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strings.HasSuffix(arch, "-debug") {
|
if strings.HasSuffix(arch, "-debug") && moduleStream != nil {
|
||||||
noDebugRevision, err = tx.GetLatestActiveRepositoryRevision(repo.ID.String(), noDebugArch)
|
c.log.Infof("arch has debug and module stream is not nil")
|
||||||
|
noDebugRevision, err = c.db.GetLatestActiveRepositoryRevision(repo.ID.String(), noDebugArch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != sql.ErrNoRows {
|
if err != sql.ErrNoRows {
|
||||||
return nil, fmt.Errorf("failed to get latest active repository revision: %v", err)
|
return nil, fmt.Errorf("failed to get latest active repository revision: %v", err)
|
||||||
|
@ -1221,6 +1201,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentRevision != nil {
|
if currentRevision != nil {
|
||||||
|
c.log.Infof("current revision is not nil")
|
||||||
if currentRevision.PrimaryXml != "" {
|
if currentRevision.PrimaryXml != "" {
|
||||||
var primaryXmlGz []byte
|
var primaryXmlGz []byte
|
||||||
var primaryXml []byte
|
var primaryXml []byte
|
||||||
|
@ -1303,22 +1284,13 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
moduleArtifacts := map[string]bool{}
|
c.log.Infof("processing %d artifacts", len(archArtifacts))
|
||||||
if moduleStream != nil && len(modulesRoot) > 0 {
|
|
||||||
for _, md := range modulesRoot {
|
|
||||||
if md.Data.Name == moduleStream.Name && md.Data.Stream == moduleStream.Stream {
|
|
||||||
for _, artifact := range md.Data.Artifacts.Rpms {
|
|
||||||
moduleArtifacts[artifact] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, artifact := range archArtifacts {
|
for _, artifact := range archArtifacts {
|
||||||
// This shouldn't happen
|
// This shouldn't happen
|
||||||
if !artifact.Metadata.Valid {
|
if !artifact.Metadata.Valid {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
c.log.Infof("processing artifact %s", artifact.Name)
|
||||||
|
|
||||||
var name string
|
var name string
|
||||||
base := strings.TrimSuffix(filepath.Base(artifact.Name), ".rpm")
|
base := strings.TrimSuffix(filepath.Base(artifact.Name), ".rpm")
|
||||||
|
@ -1356,6 +1328,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
c.log.Infof("should add %s: %v", artifact.Name, shouldAdd)
|
||||||
|
|
||||||
baseNoRpm := strings.Replace(filepath.Base(artifact.Name), ".rpm", "", 1)
|
baseNoRpm := strings.Replace(filepath.Base(artifact.Name), ".rpm", "", 1)
|
||||||
|
|
||||||
|
@ -1388,6 +1361,8 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.log.Infof("unmarshalled metadata for %s", artifact.Name)
|
||||||
|
|
||||||
if gpgId != nil {
|
if gpgId != nil {
|
||||||
newObjectKey := fmt.Sprintf("%s/%s/%s", filepath.Dir(artifact.Name), *gpgId, filepath.Base(artifact.Name))
|
newObjectKey := fmt.Sprintf("%s/%s/%s", filepath.Dir(artifact.Name), *gpgId, filepath.Base(artifact.Name))
|
||||||
|
|
||||||
|
@ -1430,7 +1405,10 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If a module stream, search for a module entry
|
if !strings.Contains(primaryPackage.Version.Rel, ".module+") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
for _, streamArtifact := range artifacts {
|
for _, streamArtifact := range artifacts {
|
||||||
var rpmName string
|
var rpmName string
|
||||||
var rpmVersion string
|
var rpmVersion string
|
||||||
|
@ -1458,10 +1436,16 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if primaryIndex != nil {
|
if primaryIndex != nil {
|
||||||
changes.ModifiedPackages = append(changes.ModifiedPackages, baseNoRpm)
|
c.log.Infof("found primary index %d", *primaryIndex)
|
||||||
|
if !utils.StrContains(baseNoRpm, changes.ModifiedPackages) && !utils.StrContains(baseNoRpm, changes.AddedPackages) {
|
||||||
|
changes.ModifiedPackages = append(changes.ModifiedPackages, baseNoRpm)
|
||||||
|
}
|
||||||
primaryRoot.Packages[*primaryIndex] = pkgPrimary.Packages[0]
|
primaryRoot.Packages[*primaryIndex] = pkgPrimary.Packages[0]
|
||||||
} else {
|
} else {
|
||||||
changes.AddedPackages = append(changes.AddedPackages, baseNoRpm)
|
c.log.Infof("did not find primary index")
|
||||||
|
if !utils.StrContains(baseNoRpm, changes.AddedPackages) && !utils.StrContains(baseNoRpm, changes.ModifiedPackages) {
|
||||||
|
changes.AddedPackages = append(changes.AddedPackages, baseNoRpm)
|
||||||
|
}
|
||||||
primaryRoot.Packages = append(primaryRoot.Packages, pkgPrimary.Packages[0])
|
primaryRoot.Packages = append(primaryRoot.Packages, pkgPrimary.Packages[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1497,6 +1481,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
otherRoot.Packages = append(otherRoot.Packages, pkgOther.Packages[0])
|
otherRoot.Packages = append(otherRoot.Packages, pkgOther.Packages[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
c.log.Infof("processed %d artifacts", len(archArtifacts))
|
||||||
|
|
||||||
// First let's delete older artifacts
|
// First let's delete older artifacts
|
||||||
// Instead of doing re-slicing, let's just not add anything matching the artifacts
|
// Instead of doing re-slicing, let's just not add anything matching the artifacts
|
||||||
|
@ -1516,6 +1501,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !shouldAdd {
|
if !shouldAdd {
|
||||||
|
c.log.Infof("deleting %s", pkg.Location.Href)
|
||||||
deleteIds = append(deleteIds, pkg.Checksum.Value)
|
deleteIds = append(deleteIds, pkg.Checksum.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1554,6 +1540,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
var moduleIndex *int
|
var moduleIndex *int
|
||||||
for i, moduleMd := range modulesRoot {
|
for i, moduleMd := range modulesRoot {
|
||||||
if moduleMd.Data.Name == moduleStream.Name && moduleMd.Data.Stream == moduleStream.Stream {
|
if moduleMd.Data.Name == moduleStream.Name && moduleMd.Data.Stream == moduleStream.Stream {
|
||||||
|
c.log.Infof("found existing module entry for %s:%s", moduleStream.Name, moduleStream.Stream)
|
||||||
moduleIndex = &*&i
|
moduleIndex = &*&i
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -1562,11 +1549,43 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
changes.ModifiedModules = append(changes.ModifiedModules, fmt.Sprintf("%s:%s", moduleStream.Name, moduleStream.Stream))
|
changes.ModifiedModules = append(changes.ModifiedModules, fmt.Sprintf("%s:%s", moduleStream.Name, moduleStream.Stream))
|
||||||
modulesRoot[*moduleIndex] = streamDocument
|
modulesRoot[*moduleIndex] = streamDocument
|
||||||
} else {
|
} else {
|
||||||
|
c.log.Infof("adding new module entry for %s:%s", moduleStream.Name, moduleStream.Stream)
|
||||||
changes.AddedModules = append(changes.AddedModules, fmt.Sprintf("%s:%s", moduleStream.Name, moduleStream.Stream))
|
changes.AddedModules = append(changes.AddedModules, fmt.Sprintf("%s:%s", moduleStream.Name, moduleStream.Stream))
|
||||||
modulesRoot = append(modulesRoot, streamDocument)
|
modulesRoot = append(modulesRoot, streamDocument)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var moduleDefaults []*modulemd.Defaults
|
||||||
|
if defaultsIndex != nil {
|
||||||
|
for module, defaults := range defaultsIndex {
|
||||||
|
for _, rootModule := range modulesRoot {
|
||||||
|
if module == rootModule.Data.Name {
|
||||||
|
moduleDefaults = append(moduleDefaults, defaults)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultsYaml []byte
|
||||||
|
|
||||||
|
if len(moduleDefaults) > 0 {
|
||||||
|
var defaultsBuf bytes.Buffer
|
||||||
|
_, _ = defaultsBuf.WriteString("---\n")
|
||||||
|
defaultsEncoder := yaml.NewEncoder(&defaultsBuf)
|
||||||
|
for _, def := range moduleDefaults {
|
||||||
|
err := defaultsEncoder.Encode(def)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode defaults: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = defaultsEncoder.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to close defaults encoder: %v", err)
|
||||||
|
}
|
||||||
|
defaultsYaml = defaultsBuf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
nRepo := repo
|
nRepo := repo
|
||||||
cache.Repos[idArch] = &CachedRepo{
|
cache.Repos[idArch] = &CachedRepo{
|
||||||
Arch: arch,
|
Arch: arch,
|
||||||
|
@ -1582,6 +1601,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
if strings.HasSuffix(arch, "-debug") || arch == "src" {
|
if strings.HasSuffix(arch, "-debug") || arch == "src" {
|
||||||
cache.Repos[idArch].Modulemd = nil
|
cache.Repos[idArch].Modulemd = nil
|
||||||
}
|
}
|
||||||
|
c.log.Infof("set cache for %s", idArch)
|
||||||
|
|
||||||
repoTask.Changes = append(repoTask.Changes, changes)
|
repoTask.Changes = append(repoTask.Changes, changes)
|
||||||
}
|
}
|
||||||
|
@ -1595,5 +1615,7 @@ func (c *Controller) makeRepoChanges(tx peridotdb.Access, req *UpdateRepoRequest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.log.Infof("finished processing %d artifacts", len(artifacts))
|
||||||
|
|
||||||
return repoTask, nil
|
return repoTask, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,6 +106,8 @@ func mn(_ *cobra.Command, _ []string) {
|
||||||
w.Worker.RegisterWorkflow(w.WorkflowController.RpmImportWorkflow)
|
w.Worker.RegisterWorkflow(w.WorkflowController.RpmImportWorkflow)
|
||||||
w.Worker.RegisterWorkflow(w.WorkflowController.RpmLookasideBatchImportWorkflow)
|
w.Worker.RegisterWorkflow(w.WorkflowController.RpmLookasideBatchImportWorkflow)
|
||||||
w.Worker.RegisterWorkflow(w.WorkflowController.CreateHashedRepositoriesWorkflow)
|
w.Worker.RegisterWorkflow(w.WorkflowController.CreateHashedRepositoriesWorkflow)
|
||||||
|
w.Worker.RegisterWorkflow(w.WorkflowController.CloneSwapWorkflow)
|
||||||
|
w.Worker.RegisterActivity(w.WorkflowController.CloneSwapActivity)
|
||||||
}
|
}
|
||||||
w.Worker.RegisterWorkflow(w.WorkflowController.ProvisionWorkerWorkflow)
|
w.Worker.RegisterWorkflow(w.WorkflowController.ProvisionWorkerWorkflow)
|
||||||
w.Worker.RegisterWorkflow(w.WorkflowController.DestroyWorkerWorkflow)
|
w.Worker.RegisterWorkflow(w.WorkflowController.DestroyWorkerWorkflow)
|
||||||
|
|
|
@ -6,7 +6,7 @@ local utils = import 'ci/utils.jsonnet';
|
||||||
|
|
||||||
resfdeploy.new({
|
resfdeploy.new({
|
||||||
name: 'yumrepofsupdater',
|
name: 'yumrepofsupdater',
|
||||||
replicas: if kubernetes.prod() then 4 else 1,
|
replicas: if kubernetes.prod() then 1 else 1,
|
||||||
dbname: 'peridot',
|
dbname: 'peridot',
|
||||||
backend: true,
|
backend: true,
|
||||||
migrate: true,
|
migrate: true,
|
||||||
|
@ -21,12 +21,12 @@ resfdeploy.new({
|
||||||
value: db.dsn_legacy('peridot', false, 'yumrepofsupdater'),
|
value: db.dsn_legacy('peridot', false, 'yumrepofsupdater'),
|
||||||
},
|
},
|
||||||
requests: if kubernetes.prod() then {
|
requests: if kubernetes.prod() then {
|
||||||
cpu: '0.5',
|
cpu: '2',
|
||||||
memory: '1G',
|
memory: '15G',
|
||||||
},
|
},
|
||||||
limits: if kubernetes.prod() then {
|
node_pool_request: {
|
||||||
cpu: '3',
|
key: 'peridot.rockylinux.org/workflow-tolerates-arch',
|
||||||
memory: '64G',
|
value: 'amd64',
|
||||||
},
|
},
|
||||||
service_account_options: {
|
service_account_options: {
|
||||||
annotations: {
|
annotations: {
|
||||||
|
|
|
@ -53,9 +53,11 @@ type Access interface {
|
||||||
GetBuildCount() (int64, error)
|
GetBuildCount() (int64, error)
|
||||||
CreateBuildBatch(projectId string) (string, error)
|
CreateBuildBatch(projectId string) (string, error)
|
||||||
AttachBuildToBatch(buildId string, batchId string) error
|
AttachBuildToBatch(buildId string, batchId string) error
|
||||||
ListBuilds(projectId string, page int32, limit int32) (models.Builds, error)
|
ListBuilds(filters *peridotpb.BuildFilters, projectId string, page int32, limit int32) (models.Builds, error)
|
||||||
|
GetSuccessfulBuildIDsAsc(projectId string) ([]string, error)
|
||||||
BuildCountInProject(projectId string) (int64, error)
|
BuildCountInProject(projectId string) (int64, error)
|
||||||
GetBuild(projectId string, buildId string) (*models.Build, error)
|
GetBuild(projectId string, buildId string) (*models.Build, error)
|
||||||
|
GetBuildByID(buildId string) (*models.Build, error)
|
||||||
GetBuildByTaskIdAndPackageId(taskId string, packageId string) (*models.Build, error)
|
GetBuildByTaskIdAndPackageId(taskId string, packageId string) (*models.Build, error)
|
||||||
GetBuildBatch(projectId string, batchId string, batchFilter *peridotpb.BatchFilter, page int32, limit int32) (models.Builds, error)
|
GetBuildBatch(projectId string, batchId string, batchFilter *peridotpb.BatchFilter, page int32, limit int32) (models.Builds, error)
|
||||||
ListBuildBatches(projectId string, batchId *string, page int32, limit int32) (models.BuildBatches, error)
|
ListBuildBatches(projectId string, batchId *string, page int32, limit int32) (models.BuildBatches, error)
|
||||||
|
@ -64,9 +66,9 @@ type Access interface {
|
||||||
LockNVRA(nvra string) error
|
LockNVRA(nvra string) error
|
||||||
UnlockNVRA(nvra string) error
|
UnlockNVRA(nvra string) error
|
||||||
NVRAExists(nvra string) (bool, error)
|
NVRAExists(nvra string) (bool, error)
|
||||||
GetBuildByPackageNameAndVersionAndRelease(name string, version string, release string, projectId string) (*models.Build, error)
|
GetBuildByPackageNameAndVersionAndRelease(name string, version string, release string) (models.Builds, error)
|
||||||
GetLatestBuildIdsByPackageName(name string, projectId string) ([]string, error)
|
GetLatestBuildIdsByPackageName(name string, projectId *string) ([]string, error)
|
||||||
GetLatestBuildsByPackageNameAndBranchName(name string, branchName string, projectId string) ([]string, error)
|
GetBuildIDsByPackageNameAndBranchName(name string, branchName string) ([]string, error)
|
||||||
GetActiveBuildIdsByTaskArtifactGlob(taskArtifactGlob string, projectId string) ([]string, error)
|
GetActiveBuildIdsByTaskArtifactGlob(taskArtifactGlob string, projectId string) ([]string, error)
|
||||||
GetAllBuildIdsByPackageName(name string, projectId string) ([]string, error)
|
GetAllBuildIdsByPackageName(name string, projectId string) ([]string, error)
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,11 @@ func (a *Access) AttachBuildToBatch(buildId string, batchId string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Access) ListBuilds(projectId string, page int32, limit int32) (models.Builds, error) {
|
func (a *Access) ListBuilds(filters *peridotpb.BuildFilters, projectId string, page int32, limit int32) (models.Builds, error) {
|
||||||
|
if filters == nil {
|
||||||
|
filters = &peridotpb.BuildFilters{}
|
||||||
|
}
|
||||||
|
|
||||||
var ret models.Builds
|
var ret models.Builds
|
||||||
err := a.query.Select(
|
err := a.query.Select(
|
||||||
&ret,
|
&ret,
|
||||||
|
@ -128,11 +132,14 @@ func (a *Access) ListBuilds(projectId string, page int32, limit int32) (models.B
|
||||||
from builds b
|
from builds b
|
||||||
inner join tasks t on t.id = b.task_id
|
inner join tasks t on t.id = b.task_id
|
||||||
inner join packages p on p.id = b.package_id
|
inner join packages p on p.id = b.package_id
|
||||||
where b.project_id = $1
|
where
|
||||||
|
b.project_id = $1
|
||||||
|
and ($2 :: int is null or $2 :: int = 0 or t.status = $2 :: int)
|
||||||
order by b.created_at desc
|
order by b.created_at desc
|
||||||
limit $2 offset $3
|
limit $3 offset $4
|
||||||
`,
|
`,
|
||||||
projectId,
|
projectId,
|
||||||
|
filters.Status,
|
||||||
limit,
|
limit,
|
||||||
utils.GetOffset(page, limit),
|
utils.GetOffset(page, limit),
|
||||||
)
|
)
|
||||||
|
@ -143,6 +150,29 @@ func (a *Access) ListBuilds(projectId string, page int32, limit int32) (models.B
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Access) GetSuccessfulBuildIDsAsc(projectId string) ([]string, error) {
|
||||||
|
var ret []string
|
||||||
|
err := a.query.Select(
|
||||||
|
&ret,
|
||||||
|
`
|
||||||
|
select
|
||||||
|
b.id
|
||||||
|
from builds b
|
||||||
|
inner join tasks t on t.id = b.task_id
|
||||||
|
where
|
||||||
|
b.project_id = $1
|
||||||
|
and t.status = 3
|
||||||
|
order by b.created_at asc
|
||||||
|
`,
|
||||||
|
projectId,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a *Access) BuildCountInProject(projectId string) (int64, error) {
|
func (a *Access) BuildCountInProject(projectId string) (int64, error) {
|
||||||
var count int64
|
var count int64
|
||||||
err := a.query.Get(&count, "select count(*) from imports where project_id = $1", projectId)
|
err := a.query.Get(&count, "select count(*) from imports where project_id = $1", projectId)
|
||||||
|
@ -186,6 +216,38 @@ func (a *Access) GetBuild(projectId string, buildId string) (*models.Build, erro
|
||||||
return &ret, nil
|
return &ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Access) GetBuildByID(buildId string) (*models.Build, error) {
|
||||||
|
var ret models.Build
|
||||||
|
err := a.query.Get(
|
||||||
|
&ret,
|
||||||
|
`
|
||||||
|
select
|
||||||
|
b.id,
|
||||||
|
b.created_at,
|
||||||
|
b.package_id,
|
||||||
|
p.name as package_name,
|
||||||
|
b.package_version_id,
|
||||||
|
b.task_id,
|
||||||
|
b.project_id,
|
||||||
|
t.status as task_status,
|
||||||
|
t.response as task_response,
|
||||||
|
t.metadata as task_metadata
|
||||||
|
from builds b
|
||||||
|
inner join tasks t on t.id = b.task_id
|
||||||
|
inner join packages p on p.id = b.package_id
|
||||||
|
where
|
||||||
|
b.id = $1
|
||||||
|
order by b.created_at desc
|
||||||
|
`,
|
||||||
|
buildId,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a *Access) GetBuildByTaskIdAndPackageId(taskId string, packageId string) (*models.Build, error) {
|
func (a *Access) GetBuildByTaskIdAndPackageId(taskId string, packageId string) (*models.Build, error) {
|
||||||
var ret models.Build
|
var ret models.Build
|
||||||
err := a.query.Get(
|
err := a.query.Get(
|
||||||
|
@ -358,9 +420,9 @@ func (a *Access) NVRAExists(nvra string) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Access) GetBuildByPackageNameAndVersionAndRelease(name string, version string, release string, projectId string) (*models.Build, error) {
|
func (a *Access) GetBuildByPackageNameAndVersionAndRelease(name string, version string, release string) (models.Builds, error) {
|
||||||
var ret models.Build
|
var ret models.Builds
|
||||||
err := a.query.Get(
|
err := a.query.Select(
|
||||||
&ret,
|
&ret,
|
||||||
`
|
`
|
||||||
select
|
select
|
||||||
|
@ -379,15 +441,12 @@ func (a *Access) GetBuildByPackageNameAndVersionAndRelease(name string, version
|
||||||
inner join packages p on p.id = b.package_id
|
inner join packages p on p.id = b.package_id
|
||||||
inner join package_versions pv on pv.id = b.package_version_id
|
inner join package_versions pv on pv.id = b.package_version_id
|
||||||
where
|
where
|
||||||
b.project_id = $1
|
p.name = $1
|
||||||
and p.name = $2
|
and pv.version = $2
|
||||||
and pv.version = $3
|
and pv.release = $3
|
||||||
and pv.release = $4
|
|
||||||
and t.status = 3
|
and t.status = 3
|
||||||
order by b.created_at desc
|
order by b.created_at desc
|
||||||
limit 1
|
|
||||||
`,
|
`,
|
||||||
projectId,
|
|
||||||
name,
|
name,
|
||||||
version,
|
version,
|
||||||
release,
|
release,
|
||||||
|
@ -396,10 +455,10 @@ func (a *Access) GetBuildByPackageNameAndVersionAndRelease(name string, version
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Access) GetLatestBuildIdsByPackageName(name string, projectId string) ([]string, error) {
|
func (a *Access) GetLatestBuildIdsByPackageName(name string, projectId *string) ([]string, error) {
|
||||||
var ret []string
|
var ret []string
|
||||||
err := a.query.Select(
|
err := a.query.Select(
|
||||||
&ret,
|
&ret,
|
||||||
|
@ -412,10 +471,8 @@ func (a *Access) GetLatestBuildIdsByPackageName(name string, projectId string) (
|
||||||
inner join project_package_versions ppv on ppv.package_version_id = b.package_version_id
|
inner join project_package_versions ppv on ppv.package_version_id = b.package_version_id
|
||||||
where
|
where
|
||||||
p.name = $2
|
p.name = $2
|
||||||
and t.status = 3
|
|
||||||
and ppv.active_in_repo = true
|
|
||||||
and ppv.project_id = b.project_id
|
and ppv.project_id = b.project_id
|
||||||
and b.project_id = $1
|
and ($1 :: uuid is null or b.project_id = $1 :: uuid)
|
||||||
order by b.created_at asc
|
order by b.created_at asc
|
||||||
`,
|
`,
|
||||||
projectId,
|
projectId,
|
||||||
|
@ -428,7 +485,7 @@ func (a *Access) GetLatestBuildIdsByPackageName(name string, projectId string) (
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Access) GetLatestBuildsByPackageNameAndBranchName(name string, branchName string, projectId string) ([]string, error) {
|
func (a *Access) GetBuildIDsByPackageNameAndBranchName(name string, branchName string) ([]string, error) {
|
||||||
var ret []string
|
var ret []string
|
||||||
err := a.query.Select(
|
err := a.query.Select(
|
||||||
&ret,
|
&ret,
|
||||||
|
@ -443,15 +500,11 @@ func (a *Access) GetLatestBuildsByPackageNameAndBranchName(name string, branchNa
|
||||||
where
|
where
|
||||||
b.project_id = $3
|
b.project_id = $3
|
||||||
and p.name = $1
|
and p.name = $1
|
||||||
and ppv.active_in_repo = true
|
and ir.scm_branch_name like '%-stream-' || $2
|
||||||
and ppv.project_id = b.project_id
|
|
||||||
and ir.scm_branch_name = $2
|
|
||||||
and t.status = 3
|
|
||||||
order by b.created_at asc
|
order by b.created_at asc
|
||||||
`,
|
`,
|
||||||
name,
|
name,
|
||||||
branchName,
|
branchName,
|
||||||
projectId,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (s *Server) ListBuilds(ctx context.Context, req *peridotpb.ListBuildsReques
|
||||||
|
|
||||||
page := utils.MinPage(req.Page)
|
page := utils.MinPage(req.Page)
|
||||||
limit := utils.MinLimit(req.Limit)
|
limit := utils.MinLimit(req.Limit)
|
||||||
builds, err := s.db.ListBuilds(req.ProjectId, page, limit)
|
builds, err := s.db.ListBuilds(req.Filters, req.ProjectId, page, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Errorf("could not list builds: %v", err)
|
s.log.Errorf("could not list builds: %v", err)
|
||||||
return nil, utils.CouldNotRetrieveObjects
|
return nil, utils.CouldNotRetrieveObjects
|
||||||
|
|
|
@ -448,3 +448,71 @@ func (s *Server) LookasideFileUpload(ctx context.Context, req *peridotpb.Lookasi
|
||||||
Digest: sha256Sum,
|
Digest: sha256Sum,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) CloneSwap(ctx context.Context, req *peridotpb.CloneSwapRequest) (*peridotpb.AsyncTask, error) {
|
||||||
|
if err := req.ValidateAll(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := s.checkPermission(ctx, ObjectProject, req.TargetProjectId.Value, PermissionManage); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := s.checkPermission(ctx, ObjectProject, req.SrcProjectId.Value, PermissionView); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
user, err := utils.UserFromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rollback := true
|
||||||
|
beginTx, err := s.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
s.log.Error(err)
|
||||||
|
return nil, utils.InternalError
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if rollback {
|
||||||
|
_ = beginTx.Rollback()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
tx := s.db.UseTransaction(beginTx)
|
||||||
|
|
||||||
|
task, err := tx.CreateTask(user, "noarch", peridotpb.TaskType_TASK_TYPE_CLONE_SWAP, &req.TargetProjectId.Value, nil)
|
||||||
|
if err != nil {
|
||||||
|
s.log.Errorf("could not create task: %v", err)
|
||||||
|
return nil, utils.InternalError
|
||||||
|
}
|
||||||
|
|
||||||
|
taskProto, err := task.ToProto(false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "could not marshal task: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rollback = false
|
||||||
|
err = beginTx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Error(codes.Internal, "could not save, try again")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.temporal.ExecuteWorkflow(
|
||||||
|
context.Background(),
|
||||||
|
client.StartWorkflowOptions{
|
||||||
|
ID: task.ID.String(),
|
||||||
|
TaskQueue: MainTaskQueue,
|
||||||
|
},
|
||||||
|
s.temporalWorker.WorkflowController.CloneSwapWorkflow,
|
||||||
|
req,
|
||||||
|
task,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
s.log.Errorf("could not start workflow: %v", err)
|
||||||
|
_ = s.db.SetTaskStatus(task.ID.String(), peridotpb.TaskStatus_TASK_STATUS_FAILED)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &peridotpb.AsyncTask{
|
||||||
|
TaskId: task.ID.String(),
|
||||||
|
Subtasks: []*peridotpb.Subtask{taskProto},
|
||||||
|
Done: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
|
@ -83,6 +83,13 @@ service ProjectService {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rpc CloneSwap(CloneSwapRequest) returns (resf.peridot.v1.AsyncTask) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v1/projects/{target_project_id=*}/cloneswap"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
rpc ListExternalRepositories(ListExternalRepositoriesRequest) returns (ListExternalRepositoriesResponse) {
|
rpc ListExternalRepositories(ListExternalRepositoriesRequest) returns (ListExternalRepositoriesResponse) {
|
||||||
option (google.api.http) = {
|
option (google.api.http) = {
|
||||||
get: "/v1/projects/{project_id=*}/external_repositories"
|
get: "/v1/projects/{project_id=*}/external_repositories"
|
||||||
|
@ -311,6 +318,17 @@ message CreateHashedRepositoriesTask {
|
||||||
repeated string repo_revisions = 1;
|
repeated string repo_revisions = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message CloneSwapRequest {
|
||||||
|
google.protobuf.StringValue target_project_id = 1 [(validate.rules).message.required = true];
|
||||||
|
google.protobuf.StringValue src_project_id = 2 [(validate.rules).message.required = true];
|
||||||
|
}
|
||||||
|
|
||||||
|
message CloneSwapTask {
|
||||||
|
string target_project_id = 1;
|
||||||
|
string src_project_id = 2;
|
||||||
|
repeated string build_ids_layered = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message LookasideFileUploadRequest {
|
message LookasideFileUploadRequest {
|
||||||
string file = 1 [(validate.rules).string.min_bytes = 1];
|
string file = 1 [(validate.rules).string.min_bytes = 1];
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,6 +73,7 @@ enum TaskType {
|
||||||
TASK_TYPE_CREATE_HASHED_REPOSITORIES = 17;
|
TASK_TYPE_CREATE_HASHED_REPOSITORIES = 17;
|
||||||
TASK_TYPE_LOOKASIDE_FILE_UPLOAD = 18;
|
TASK_TYPE_LOOKASIDE_FILE_UPLOAD = 18;
|
||||||
TASK_TYPE_RPM_LOOKASIDE_BATCH_IMPORT = 19;
|
TASK_TYPE_RPM_LOOKASIDE_BATCH_IMPORT = 19;
|
||||||
|
TASK_TYPE_CLONE_SWAP = 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TaskStatus {
|
enum TaskStatus {
|
||||||
|
|
|
@ -121,6 +121,14 @@ func NullStringToPointer(s sql.NullString) *string {
|
||||||
return &s.String
|
return &s.String
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NullStringToEmptyString(s sql.NullString) string {
|
||||||
|
if !s.Valid {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.String
|
||||||
|
}
|
||||||
|
|
||||||
func Int64(i int64) *int64 {
|
func Int64(i int64) *int64 {
|
||||||
return &i
|
return &i
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/rocky-linux/srpmproc/pkg/rpmutils/regex.go
generated
vendored
2
vendor/github.com/rocky-linux/srpmproc/pkg/rpmutils/regex.go
generated
vendored
|
@ -3,4 +3,4 @@ package rpmutils
|
||||||
import "regexp"
|
import "regexp"
|
||||||
|
|
||||||
// Nvr is a regular expression that matches a NVR.
|
// Nvr is a regular expression that matches a NVR.
|
||||||
var Nvr = regexp.MustCompile("^(\\S+)-([\\w~%.+]+)-(\\w+(?:\\.[\\w+]+)+?)(?:\\.rpm)?$")
|
var Nvr = regexp.MustCompile("^(\\S+)-([\\w~%.+]+)-(\\w+(?:\\.[\\w~%+]+)+?)(?:\\.rpm)?$")
|
||||||
|
|
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
|
@ -453,7 +453,7 @@ github.com/prometheus/procfs/internal/util
|
||||||
github.com/rivo/uniseg
|
github.com/rivo/uniseg
|
||||||
# github.com/robfig/cron v1.2.0
|
# github.com/robfig/cron v1.2.0
|
||||||
github.com/robfig/cron
|
github.com/robfig/cron
|
||||||
# github.com/rocky-linux/srpmproc v0.4.2
|
# github.com/rocky-linux/srpmproc v0.4.3
|
||||||
## explicit
|
## explicit
|
||||||
github.com/rocky-linux/srpmproc/modulemd
|
github.com/rocky-linux/srpmproc/modulemd
|
||||||
github.com/rocky-linux/srpmproc/pb
|
github.com/rocky-linux/srpmproc/pb
|
||||||
|
@ -636,7 +636,6 @@ golang.org/x/sys/plan9
|
||||||
golang.org/x/sys/unix
|
golang.org/x/sys/unix
|
||||||
golang.org/x/sys/windows
|
golang.org/x/sys/windows
|
||||||
# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||||
## explicit
|
|
||||||
golang.org/x/term
|
golang.org/x/term
|
||||||
# golang.org/x/text v0.3.7
|
# golang.org/x/text v0.3.7
|
||||||
golang.org/x/text/encoding
|
golang.org/x/text/encoding
|
||||||
|
|
Loading…
Reference in a new issue