Add kernelmanager

This commit is contained in:
Mustafa Gezen 2023-10-04 02:55:49 +02:00
parent 91fc789dcb
commit 60e664b8ae
Signed by: mustafa
GPG Key ID: DCDF010D946438C1
137 changed files with 55194 additions and 378 deletions

View File

@ -24,6 +24,11 @@
# We want to allow certain POST methods to set body to something other than "*".
# Useful for non-JSON payloads.
- "core::0136::http-body"
# I don't really understand this requirement but looks like Google has a special
# use case when it comes to user provided IDs.
- "core::0133::request-id-field"
# We don't require update mask support
- "core::0134::request-mask-required"
- included_paths:
- "third_party/**/*.proto"
- "vendor/**/*.proto"

View File

@ -6,3 +6,4 @@ bazel-out
bazel-testlogs
bazel-peridot
node_modules
vendor/go.resf.org

View File

@ -44,216 +44,202 @@ const (
EnvVarStoragePathStyle EnvVar = "STORAGE_PATH_STYLE"
)
var defaultCliFlagsDatabaseOnly = []cli.Flag{
&cli.StringFlag{
Name: "database-url",
Aliases: []string{"d"},
Usage: "database url",
EnvVars: []string{string(EnvVarDatabaseURL)},
Value: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable",
},
func WithDatabaseFlags(appName string) []cli.Flag {
if appName == "" {
appName = "postgres"
}
return []cli.Flag{
&cli.StringFlag{
Name: "database-url",
Aliases: []string{"d"},
Usage: "database url",
EnvVars: []string{string(EnvVarDatabaseURL)},
Value: "postgres://postgres:postgres@localhost:5432/" + appName + "?sslmode=disable",
},
}
}
var defaultCliFlagsTemporal = append(defaultCliFlagsDatabaseOnly, []cli.Flag{
&cli.StringFlag{
Name: "temporal-namespace",
Aliases: []string{"n"},
Usage: "temporal namespace",
EnvVars: []string{string(EnvVarTemporalNamespace)},
Value: "default",
},
&cli.StringFlag{
Name: "temporal-address",
Aliases: []string{"a"},
Usage: "temporal address",
EnvVars: []string{string(EnvVarTemporalAddress)},
Value: "localhost:7233",
},
&cli.StringFlag{
Name: "temporal-task-queue",
Aliases: []string{"q"},
Usage: "temporal task queue",
EnvVars: []string{string(EnvVarTemporalTaskQueue)},
},
}...)
func WithTemporalFlags(defaultNamespace string, defaultTaskQueue string) []cli.Flag {
if defaultNamespace == "" {
defaultNamespace = "default"
}
var defaultCliFlagsNoAuth = append(defaultCliFlagsDatabaseOnly, []cli.Flag{
&cli.IntFlag{
Name: "grpc-port",
Usage: "gRPC port",
EnvVars: []string{string(EnvVarGRPCPort)},
Value: 8080,
},
&cli.IntFlag{
Name: "gateway-port",
Usage: "gRPC gateway port",
EnvVars: []string{string(EnvVarGatewayPort)},
Value: 8081,
},
}...)
var defaultCliFlagsNoAuthTemporal = append(defaultCliFlagsTemporal, []cli.Flag{
&cli.IntFlag{
Name: "grpc-port",
Usage: "gRPC port",
EnvVars: []string{string(EnvVarGRPCPort)},
Value: 8080,
},
&cli.IntFlag{
Name: "gateway-port",
Usage: "gRPC gateway port",
EnvVars: []string{string(EnvVarGatewayPort)},
Value: 8081,
},
}...)
var defaultCliFlags = append(defaultCliFlagsNoAuth, []cli.Flag{
&cli.StringFlag{
Name: "oidc-issuer",
Usage: "OIDC issuer",
EnvVars: []string{string(EnvVarFrontendOIDCIssuer)},
Value: "https://accounts.rockylinux.org/auth/realms/rocky",
},
&cli.StringFlag{
Name: "required-oidc-group",
Usage: "OIDC group that is required to access the frontend",
EnvVars: []string{string(EnvVarFrontendRequiredOIDCGroup)},
},
}...)
var defaultCliFlagsTemporalClient = append(defaultCliFlagsNoAuthTemporal, []cli.Flag{
&cli.StringFlag{
Name: "oidc-issuer",
Usage: "OIDC issuer",
EnvVars: []string{string(EnvVarFrontendOIDCIssuer)},
Value: "https://accounts.rockylinux.org/auth/realms/rocky",
},
&cli.StringFlag{
Name: "required-oidc-group",
Usage: "OIDC group that is required to access the frontend",
EnvVars: []string{string(EnvVarFrontendRequiredOIDCGroup)},
},
}...)
var defaultFrontendNoAuthCliFlags = []cli.Flag{
&cli.IntFlag{
Name: "port",
Usage: "frontend port",
EnvVars: []string{string(EnvVarFrontendPort)},
Value: 9111,
},
return []cli.Flag{
&cli.StringFlag{
Name: "temporal-namespace",
Aliases: []string{"n"},
Usage: "temporal namespace",
EnvVars: []string{string(EnvVarTemporalNamespace)},
Value: defaultNamespace,
},
&cli.StringFlag{
Name: "temporal-address",
Aliases: []string{"a"},
Usage: "temporal address",
EnvVars: []string{string(EnvVarTemporalAddress)},
Value: "localhost:7233",
},
&cli.StringFlag{
Name: "temporal-task-queue",
Aliases: []string{"q"},
Usage: "temporal task queue",
EnvVars: []string{string(EnvVarTemporalTaskQueue)},
Value: defaultTaskQueue,
},
}
}
var defaultFrontendCliFlags = append(defaultFrontendNoAuthCliFlags, []cli.Flag{
&cli.StringFlag{
Name: "oidc-issuer",
Usage: "OIDC issuer",
EnvVars: []string{string(EnvVarFrontendOIDCIssuer)},
Value: "https://accounts.rockylinux.org/auth/realms/rocky",
},
&cli.StringFlag{
Name: "oidc-client-id",
Usage: "OIDC client ID",
EnvVars: []string{string(EnvVarFrontendOIDCClientID)},
},
&cli.StringFlag{
Name: "oidc-client-secret",
Usage: "OIDC client secret",
EnvVars: []string{string(EnvVarFrontendOIDCClientSecret)},
},
&cli.StringFlag{
Name: "oidc-userinfo-override",
Usage: "OIDC userinfo override",
EnvVars: []string{string(EnvVarFrontendOIDCUserInfoOverride)},
},
&cli.StringFlag{
Name: "required-oidc-group",
Usage: "OIDC group that is required to access the frontend",
EnvVars: []string{string(EnvVarFrontendRequiredOIDCGroup)},
},
&cli.StringFlag{
Name: "self",
Usage: "Endpoint pointing to the frontend",
EnvVars: []string{string(EnvVarFrontendSelf)},
},
}...)
func WithGrpcFlags(defaultPort int) []cli.Flag {
if defaultPort == 0 {
defaultPort = 8080
}
var storageFlags = []cli.Flag{
&cli.StringFlag{
Name: "storage-endpoint",
Usage: "storage endpoint",
EnvVars: []string{string(EnvVarStorageEndpoint)},
Value: "",
},
&cli.StringFlag{
Name: "storage-connection-string",
Usage: "storage connection string",
EnvVars: []string{string(EnvVarStorageConnectionString)},
},
&cli.StringFlag{
Name: "storage-region",
Usage: "storage region",
EnvVars: []string{string(EnvVarStorageRegion)},
// RESF default region
Value: "us-east-2",
},
&cli.BoolFlag{
Name: "storage-secure",
Usage: "storage secure",
EnvVars: []string{string(EnvVarStorageSecure)},
Value: true,
},
&cli.BoolFlag{
Name: "storage-path-style",
Usage: "storage path style",
EnvVars: []string{string(EnvVarStoragePathStyle)},
Value: false,
},
return []cli.Flag{
&cli.IntFlag{
Name: "grpc-port",
Usage: "gRPC port",
EnvVars: []string{string(EnvVarGRPCPort)},
Value: defaultPort,
},
}
}
// WithDefaultCliFlags adds the default cli flags to the app.
func WithDefaultCliFlags(flags ...cli.Flag) []cli.Flag {
return append(defaultCliFlags, flags...)
func WithGatewayFlags(defaultPort int) []cli.Flag {
if defaultPort == 0 {
defaultPort = 8081
}
return []cli.Flag{
&cli.IntFlag{
Name: "gateway-port",
Usage: "gRPC gateway port",
EnvVars: []string{string(EnvVarGatewayPort)},
Value: defaultPort,
},
}
}
// WithDefaultCliFlagsTemporalClient adds the default cli flags to the app.
func WithDefaultCliFlagsTemporalClient(flags ...cli.Flag) []cli.Flag {
return append(defaultCliFlagsTemporalClient, flags...)
func WithOidcFlags(defaultOidcIssuer string, defaultGroup string) []cli.Flag {
if defaultOidcIssuer == "" {
defaultOidcIssuer = "https://accounts.rockylinux.org/auth/realms/rocky"
}
return []cli.Flag{
&cli.StringFlag{
Name: "oidc-issuer",
Usage: "OIDC issuer",
EnvVars: []string{string(EnvVarFrontendOIDCIssuer)},
Value: defaultOidcIssuer,
},
&cli.StringFlag{
Name: "required-oidc-group",
Usage: "OIDC group that is required to access the frontend",
EnvVars: []string{string(EnvVarFrontendRequiredOIDCGroup)},
Value: defaultGroup,
},
}
}
// WithDefaultCliFlagsNoAuth adds the default cli flags to the app.
func WithDefaultCliFlagsNoAuth(flags ...cli.Flag) []cli.Flag {
return append(defaultCliFlagsNoAuth, flags...)
func WithFrontendFlags(defaultPort int) []cli.Flag {
if defaultPort == 0 {
defaultPort = 9111
}
return []cli.Flag{
&cli.IntFlag{
Name: "port",
Usage: "frontend port",
EnvVars: []string{string(EnvVarFrontendPort)},
Value: defaultPort,
},
}
}
// WithDefaultCliFlagsNoAuthTemporal adds the default cli flags to the app.
func WithDefaultCliFlagsNoAuthTemporal(flags ...cli.Flag) []cli.Flag {
return append(defaultCliFlagsNoAuthTemporal, flags...)
}
func WithFrontendAuthFlags(defaultOidcIssuer string) []cli.Flag {
if defaultOidcIssuer == "" {
defaultOidcIssuer = "https://accounts.rockylinux.org/auth/realms/rocky"
}
// WithDefaultCliFlagsTemporal adds the default cli flags to the app.
func WithDefaultCliFlagsTemporal(flags ...cli.Flag) []cli.Flag {
return append(defaultCliFlagsTemporal, flags...)
}
// WithDefaultCliFlagsDatabaseOnly adds the default cli flags to the app.
func WithDefaultCliFlagsDatabaseOnly(flags ...cli.Flag) []cli.Flag {
return append(defaultCliFlagsDatabaseOnly, flags...)
}
// WithDefaultFrontendNoAuthCliFlags adds the default frontend cli flags to the app.
func WithDefaultFrontendNoAuthCliFlags(flags ...cli.Flag) []cli.Flag {
return append(defaultFrontendNoAuthCliFlags, flags...)
}
// WithDefaultFrontendCliFlags adds the default frontend cli flags to the app.
func WithDefaultFrontendCliFlags(flags ...cli.Flag) []cli.Flag {
return append(defaultFrontendCliFlags, flags...)
return []cli.Flag{
&cli.StringFlag{
Name: "oidc-issuer",
Usage: "OIDC issuer",
EnvVars: []string{string(EnvVarFrontendOIDCIssuer)},
Value: defaultOidcIssuer,
},
&cli.StringFlag{
Name: "oidc-client-id",
Usage: "OIDC client ID",
EnvVars: []string{string(EnvVarFrontendOIDCClientID)},
},
&cli.StringFlag{
Name: "oidc-client-secret",
Usage: "OIDC client secret",
EnvVars: []string{string(EnvVarFrontendOIDCClientSecret)},
},
&cli.StringFlag{
Name: "oidc-userinfo-override",
Usage: "OIDC userinfo override",
EnvVars: []string{string(EnvVarFrontendOIDCUserInfoOverride)},
},
&cli.StringFlag{
Name: "required-oidc-group",
Usage: "OIDC group that is required to access the frontend",
EnvVars: []string{string(EnvVarFrontendRequiredOIDCGroup)},
},
&cli.StringFlag{
Name: "self",
Usage: "Endpoint pointing to the frontend",
EnvVars: []string{string(EnvVarFrontendSelf)},
},
}
}
// WithStorageFlags adds the storage flags to the app.
func WithStorageFlags(flags ...cli.Flag) []cli.Flag {
return append(storageFlags, flags...)
func WithStorageFlags() []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "storage-endpoint",
Usage: "storage endpoint",
EnvVars: []string{string(EnvVarStorageEndpoint)},
Value: "",
},
&cli.StringFlag{
Name: "storage-connection-string",
Usage: "storage connection string",
EnvVars: []string{string(EnvVarStorageConnectionString)},
},
&cli.StringFlag{
Name: "storage-region",
Usage: "storage region",
EnvVars: []string{string(EnvVarStorageRegion)},
// RESF default region
Value: "us-east-2",
},
&cli.BoolFlag{
Name: "storage-secure",
Usage: "storage secure",
EnvVars: []string{string(EnvVarStorageSecure)},
Value: true,
},
&cli.BoolFlag{
Name: "storage-path-style",
Usage: "storage path style",
EnvVars: []string{string(EnvVarStoragePathStyle)},
Value: false,
},
}
}
func WithFlags(flags ...[]cli.Flag) []cli.Flag {
var result []cli.Flag
for _, f := range flags {
result = append(result, f...)
}
return result
}
// FlagsToGRPCServerOptions converts the cli flags to gRPC server options.
@ -311,19 +297,6 @@ func GetTemporalClientFromFlags(ctx *cli.Context, opts client.Options) (client.C
)
}
// ChangeDefaultForEnvVar changes the default value of a flag based on an environment variable.
func ChangeDefaultForEnvVar(envVar EnvVar, newDefault string) {
// Check if the environment variable is set.
if _, ok := os.LookupEnv(string(envVar)); ok {
return
}
// Change the default value.
if err := os.Setenv(string(envVar), newDefault); err != nil {
LogFatalf("failed to set environment variable %s: %v", envVar, err)
}
}
// RareUseChangeDefault changes the default value of an arbitrary environment variable.
func RareUseChangeDefault(envVar string, newDefault string) {
// Check if the environment variable is set.
@ -336,8 +309,3 @@ func RareUseChangeDefault(envVar string, newDefault string) {
LogFatalf("failed to set environment variable %s: %v", envVar, err)
}
}
// ChangeDefaultDatabaseURL changes the default value of the database url based on an environment variable.
func ChangeDefaultDatabaseURL(appName string) {
ChangeDefaultForEnvVar(EnvVarDatabaseURL, "postgres://postgres:postgres@localhost:5432/"+appName+"?sslmode=disable")
}

View File

@ -20,7 +20,7 @@ go_library(
"caching.go",
"forge.go",
],
importpath = "go.resf.org/peridot/tools/mothership/worker_server/forge",
importpath = "go.resf.org/peridot/base/go/forge",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/go-git/go-git/v5/plumbing/transport"],
)

View File

@ -34,4 +34,5 @@ type Forge interface {
GetRemote(repo string) string
GetCommitViewerURL(repo string, commit string) string
EnsureRepositoryExists(auth *Authenticator, repo string) error
WithNamespace(namespace string) Forge
}

View File

@ -17,10 +17,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "github",
srcs = ["github.go"],
importpath = "go.resf.org/peridot/tools/mothership/worker_server/forge/github",
importpath = "go.resf.org/peridot/base/go/forge/github",
visibility = ["//visibility:public"],
deps = [
"//tools/mothership/worker_server/forge",
"//base/go/forge",
"//vendor/github.com/go-git/go-git/v5/plumbing/transport/http",
"//vendor/github.com/golang-jwt/jwt/v5:jwt",
],

View File

@ -22,7 +22,7 @@ import (
transport_http "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/golang-jwt/jwt/v5"
_ "github.com/golang-jwt/jwt/v5"
"go.resf.org/peridot/tools/mothership/worker_server/forge"
"go.resf.org/peridot/base/go/forge"
"net/http"
"path/filepath"
"strconv"
@ -259,3 +259,9 @@ func (f *Forge) EnsureRepositoryExists(auth *forge.Authenticator, repo string) e
return nil
}
func (f *Forge) WithNamespace(namespace string) forge.Forge {
newF := *f
newF.organization = namespace
return &newF
}

12
base/go/forge/gitlab/BUILD vendored Normal file
View File

@ -0,0 +1,12 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "gitlab",
srcs = ["gitlab.go"],
importpath = "go.resf.org/peridot/base/go/forge/gitlab",
visibility = ["//visibility:public"],
deps = [
"//base/go/forge",
"//vendor/github.com/go-git/go-git/v5/plumbing/transport/http",
],
)

View File

@ -0,0 +1,163 @@
package gitlab
import (
"bytes"
"encoding/json"
"fmt"
transport_http "github.com/go-git/go-git/v5/plumbing/transport/http"
"go.resf.org/peridot/base/go/forge"
"io"
"net/http"
"net/url"
"time"
)
type Forge struct {
host string
group string
username string
password string
authorName string
authorEmail string
shouldMakeRepoPublic bool
}
func New(host string, group string, username string, password string, authorName string, authorEmail string, shouldMakeRepoPublic bool) *Forge {
return &Forge{
host: host,
group: group,
username: username,
password: password,
authorName: authorName,
authorEmail: authorEmail,
shouldMakeRepoPublic: shouldMakeRepoPublic,
}
}
func (f *Forge) GetAuthenticator() (*forge.Authenticator, error) {
transporter := &transport_http.BasicAuth{
Username: f.username,
Password: f.password,
}
// We're assuming never expiring tokens for now
// Set it to 100 years from now
expires := time.Now().AddDate(100, 0, 0)
return &forge.Authenticator{
AuthMethod: transporter,
AuthorName: f.authorName,
AuthorEmail: f.authorEmail,
Expires: expires,
}, nil
}
func (f *Forge) GetRemote(repo string) string {
return fmt.Sprintf("https://%s/%s/%s", f.host, f.group, repo)
}
func (f *Forge) GetCommitViewerURL(repo string, commit string) string {
return fmt.Sprintf(
"https://%s/%s/%s/-/commit/%s",
f.host,
f.group,
repo,
commit,
)
}
func (f *Forge) EnsureRepositoryExists(auth *forge.Authenticator, repo string) error {
// Cast AuthMethod to BasicAuth
basicAuth := auth.AuthMethod.(*transport_http.BasicAuth)
token := basicAuth.Password
client := &http.Client{
Timeout: time.Second * 10,
}
// Check if the repo exists
urlEncodedPath := url.PathEscape(fmt.Sprintf("%s/%s", f.group, repo))
endpoint := fmt.Sprintf("https://%s/api/v4/projects/%s", f.host, urlEncodedPath)
req, err := http.NewRequest("GET", endpoint, nil)
if err != nil {
return err
}
req.Header.Add("Authorization", "Bearer "+token)
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == 200 {
// Repo exists, we're done
return nil
}
// Repo doesn't exist, create it
// First get namespace id
endpoint = fmt.Sprintf("https://%s/api/v4/namespaces/%s", f.host, url.PathEscape(f.group))
req, err = http.NewRequest("GET", endpoint, nil)
if err != nil {
return err
}
req.Header.Add("Authorization", "Bearer "+token)
resp, err = client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("namespace %s does not exist", f.group)
}
mapBody := map[string]any{}
err = json.NewDecoder(resp.Body).Decode(&mapBody)
if err != nil {
return err
}
namespaceId := mapBody["id"].(float64)
mapBody = map[string]any{
"name": repo,
"namespace_id": namespaceId,
}
if f.shouldMakeRepoPublic {
mapBody["visibility"] = "public"
} else {
mapBody["visibility"] = "private"
}
endpoint = fmt.Sprintf("https://%s/api/v4/projects", f.host)
body, err := json.Marshal(mapBody)
if err != nil {
return err
}
req, err = http.NewRequest("POST", endpoint, bytes.NewReader(body))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "Bearer "+token)
resp, err = client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 201 && resp.StatusCode != 200 {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed to create repo %s: %s", repo, string(body))
}
return nil
}
func (f *Forge) WithNamespace(namespace string) forge.Forge {
newF := *f
newF.group = namespace
return &newF
}

View File

@ -98,7 +98,7 @@ var frontendHtmlTemplate = `
name="viewport"
content="width=device-width, initial-scale=1, viewport-fit=cover"
/>
<title>{{.Title}}</title>
<title>{{-Title-}}</title>
<link rel="icon" type="image/png" href="/_ga/favicon.png" />
@ -127,6 +127,7 @@ var frontendHtmlTemplate = `
}
</script>
{{end}}
{{-Beta-}}
{{if .Prefix}}
<script>window.__peridot_prefix__ = '{{.Prefix}}'.replace('\\', '');</script>
{{end}}
@ -144,7 +145,7 @@ var frontendUnauthenticated = `
name="viewport"
content="width=device-width, initial-scale=1, viewport-fit=cover"
/>
<title>{{.Title}} - Unauthenticated</title>
<title>{{-Title-}} - Ouch</title>
<link rel="icon" type="image/png" href="/_ga/favicon.png" />
@ -222,45 +223,50 @@ func (info *FrontendInfo) frontendAuthHandler(provider OidcProvider, h http.Hand
}
}
ctx := r.Context()
// get auth cookie
authCookie, err := r.Cookie(frontendAuthCookieKey)
if err != nil {
// redirect to login
http.Redirect(w, r, info.Self+"/auth/oidc/login", http.StatusFound)
return
}
// verify the token
userInfo, err := provider.UserInfo(r.Context(), oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: authCookie.Value,
TokenType: "Bearer",
}))
if err != nil {
// redirect to login
http.Redirect(w, r, info.Self+"/auth/oidc/login", http.StatusFound)
return
}
// Check if the user is in the group
var claims oidcClaims
err = userInfo.Claims(&claims)
if err != nil {
// redirect to login
http.Redirect(w, r, info.Self+"/auth/oidc/login", http.StatusFound)
return
}
groups := claims.Groups
if info.OIDCGroup != "" {
if !Contains(groups, info.OIDCGroup) {
// show unauthenticated page
info.renderUnauthorized(w, fmt.Sprintf("User is not in group %s", info.OIDCGroup))
// only redirect if not allowed unauthenticated
if !info.AllowUnauthenticated {
// redirect to login
http.Redirect(w, r, info.Self+"/auth/oidc/login", http.StatusFound)
return
}
} else {
// verify the token
userInfo, err := provider.UserInfo(r.Context(), oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: authCookie.Value,
TokenType: "Bearer",
}))
if err != nil {
// redirect to login
http.Redirect(w, r, info.Self+"/auth/oidc/login", http.StatusFound)
return
}
}
// Add the user to the context
ctx := context.WithValue(r.Context(), "user", userInfo)
// Check if the user is in the group
var claims oidcClaims
err = userInfo.Claims(&claims)
if err != nil {
// redirect to login
http.Redirect(w, r, info.Self+"/auth/oidc/login", http.StatusFound)
return
}
groups := claims.Groups
if info.OIDCGroup != "" {
if !Contains(groups, info.OIDCGroup) {
// show unauthenticated page
info.renderUnauthorized(w, fmt.Sprintf("User is not in group %s", info.OIDCGroup))
return
}
}
// Add the user to the context
ctx = context.WithValue(ctx, "user", userInfo)
}
h.ServeHTTP(w, r.WithContext(ctx))
})
@ -290,8 +296,8 @@ func FrontendServer(info *FrontendInfo, embedfs *embed.FS) error {
if info.Title == "" {
info.Title = "Peridot"
}
newTemplate = strings.ReplaceAll(newTemplate, "{{.Title}}", info.Title)
newUnauthenticatedTemplate = strings.ReplaceAll(newUnauthenticatedTemplate, "{{.Title}}", info.Title)
newTemplate = strings.ReplaceAll(newTemplate, "{{-Title-}}", info.Title)
newUnauthenticatedTemplate = strings.ReplaceAll(newUnauthenticatedTemplate, "{{-Title-}}", info.Title)
info.unauthenticatedTemplate = newUnauthenticatedTemplate
@ -354,7 +360,15 @@ func FrontendServer(info *FrontendInfo, embedfs *embed.FS) error {
http.HandleFunc(prefix+"/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
tmpl, err := template.New("index.html").Parse(newTemplate)
// Set beta script (if beta basically set window.__beta__ = true)
srvTemplate := newTemplate
betaScript := ""
if r.Header.Get("x-peridot-beta") == "true" {
betaScript = "<script>window.__beta__ = true;</script>"
}
srvTemplate = strings.ReplaceAll(srvTemplate, "{{-Beta-}}", betaScript)
tmpl, err := template.New("index.html").Parse(srvTemplate)
if err != nil {
info.renderUnauthorized(w, fmt.Sprintf("Failed to parse template: %v", err))
return
@ -523,8 +537,8 @@ func FrontendServer(info *FrontendInfo, embedfs *embed.FS) error {
}
var handler http.Handler = nil
// if auth is enabled as well as AllowUnauthenticated is false, then wrap the handler with the auth handler
if !info.NoAuth && !info.AllowUnauthenticated {
// if auth is enabled as well, then wrap the handler with the auth handler
if !info.NoAuth {
handler = info.frontendAuthHandler(provider, http.DefaultServeMux)
} else {
handler = http.DefaultServeMux

8
base/go/kv/BUILD vendored Normal file
View File

@ -0,0 +1,8 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "kv",
srcs = ["kv.go"],
importpath = "go.resf.org/peridot/base/go/kv",
visibility = ["//visibility:public"],
)

17
base/go/kv/dynamodb/BUILD vendored Normal file
View File

@ -0,0 +1,17 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "dynamodb",
srcs = ["dynamodb.go"],
importpath = "go.resf.org/peridot/base/go/kv/dynamodb",
visibility = ["//visibility:public"],
deps = [
"//base/go/awsutils",
"//base/go/kv",
"//base/proto:pb",
"//vendor/github.com/aws/aws-sdk-go/aws",
"//vendor/github.com/aws/aws-sdk-go/aws/session",
"//vendor/github.com/aws/aws-sdk-go/service/dynamodb",
"@org_golang_google_protobuf//proto",
],
)

View File

@ -0,0 +1,328 @@
package dynamodb
import (
"context"
"crypto/rand"
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"go.resf.org/peridot/base/go/awsutils"
"go.resf.org/peridot/base/go/kv"
basepb "go.resf.org/peridot/base/go/pb"
"google.golang.org/protobuf/proto"
"strings"
"time"
)
type DynamoDB struct {
db *dynamodb.DynamoDB
tableName string
}
// New creates a new DynamoDB storage backend.
func New(endpoint string, tableName string) (*DynamoDB, error) {
awsCfg := &aws.Config{}
awsutils.FillOutConfig(awsCfg)
if endpoint != "" {
awsCfg.Endpoint = aws.String(endpoint)
}
sess, err := session.NewSession(awsCfg)
if err != nil {
return nil, err
}
svc := dynamodb.New(sess)
// Create the table if it doesn't exist.
// First check if the table exists.
_, err = svc.DescribeTable(&dynamodb.DescribeTableInput{
TableName: aws.String(tableName),
})
if err != nil {
_, err = svc.CreateTable(&dynamodb.CreateTableInput{
TableName: aws.String(tableName),
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String("Key"),
AttributeType: aws.String("S"),
},
{
AttributeName: aws.String("Path"),
AttributeType: aws.String("S"),
},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String("Key"),
KeyType: aws.String("HASH"),
},
{
AttributeName: aws.String("Path"),
KeyType: aws.String("RANGE"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(5),
WriteCapacityUnits: aws.Int64(5),
},
})
if err != nil {
return nil, err
}
}
return &DynamoDB{
db: svc,
tableName: tableName,
}, nil
}
func (d *DynamoDB) Get(ctx context.Context, key string) (*kv.Pair, error) {
trimmed := strings.TrimPrefix(key, "/")
parts := strings.Split(trimmed, "/")
if len(parts) < 2 {
return nil, kv.ErrNoNamespace
}
ns := parts[0]
result, err := d.db.GetItem(&dynamodb.GetItemInput{
TableName: aws.String(d.tableName),
Key: map[string]*dynamodb.AttributeValue{
"Key": {
S: aws.String(ns),
},
"Path": {
S: aws.String(strings.Join(parts[1:], "/")),
},
},
})
if err != nil {
return nil, err
}
if result.Item == nil {
return nil, kv.ErrNotFound
}
return &kv.Pair{
Key: *result.Item["Key"].S,
Value: result.Item["Value"].B,
}, nil
}
func (d *DynamoDB) Set(ctx context.Context, key string, value []byte) error {
trimmed := strings.TrimPrefix(key, "/")
parts := strings.Split(trimmed, "/")
if len(parts) < 2 {
return kv.ErrNoNamespace
}
ns := parts[0]
_, err := d.db.PutItem(&dynamodb.PutItemInput{
TableName: aws.String(d.tableName),
Item: map[string]*dynamodb.AttributeValue{
"Key": {
S: aws.String(ns),
},
"Path": {
S: aws.String(strings.Join(parts[1:], "/")),
},
"Value": {
B: value,
},
},
})
return err
}
func (d *DynamoDB) Delete(ctx context.Context, key string) error {
trimmed := strings.TrimPrefix(key, "/")
parts := strings.Split(trimmed, "/")
if len(parts) < 2 {
return kv.ErrNoNamespace
}
ns := parts[0]
_, err := d.db.DeleteItem(&dynamodb.DeleteItemInput{
TableName: aws.String(d.tableName),
Key: map[string]*dynamodb.AttributeValue{
"Key": {
S: aws.String(ns),
},
"Path": {
S: aws.String(strings.Join(parts[1:], "/")),
},
},
})
return err
}
func (d *DynamoDB) RangePrefix(ctx context.Context, prefix string, pageSize int32, pageToken string) (*kv.Query, error) {
if pageSize <= 0 {
pageSize = 20
}
if pageSize > 100 {
pageSize = 100
}
// Check if there is a page token.
var fromKey string
var fromPath string
if pageToken != "" {
// Get the page token from the database.
res, err := d.Get(ctx, fmt.Sprintf("/_internals/page_tokens/%s", pageToken))
if err != nil {
if errors.Is(err, kv.ErrNotFound) {
return nil, kv.ErrPageTokenNotFound
}
return nil, err
}
// Parse the page token.
pt := &basepb.DynamoDbPageToken{}
err = proto.Unmarshal(res.Value, pt)
if err != nil {
return nil, err
}
fromKey = pt.LastKey
fromPath = pt.LastPath
}
trimmed := strings.TrimPrefix(prefix, "/")
parts := strings.Split(trimmed, "/")
if len(parts) < 2 {
return nil, kv.ErrNoNamespace
}
ns := parts[0]
queryInput := &dynamodb.QueryInput{
TableName: aws.String(d.tableName),
KeyConditions: map[string]*dynamodb.Condition{
"Key": {
ComparisonOperator: aws.String("EQ"),
AttributeValueList: []*dynamodb.AttributeValue{
{
S: aws.String(ns),
},
},
},
"Path": {
ComparisonOperator: aws.String("BEGINS_WITH"),
AttributeValueList: []*dynamodb.AttributeValue{
{
S: aws.String(strings.Join(parts[1:], "/")),
},
},
},
},
Limit: aws.Int64(int64(pageSize + 1)),
}
if fromKey != "" && fromPath != "" {
queryInput.ExclusiveStartKey = map[string]*dynamodb.AttributeValue{
"Key": {
S: aws.String(fromKey),
},
"Path": {
S: aws.String(fromPath),
},
}
}
result, err := d.db.Query(queryInput)
if err != nil {
return nil, err
}
pairs := make([]*kv.Pair, 0, len(result.Items))
for _, item := range result.Items {
// Since we fetch pageSize+1, stop if we have enough.
if len(pairs) >= int(pageSize) {
break
}
pairs = append(pairs, &kv.Pair{
Key: *item["Key"].S,
Value: item["Value"].B,
})
}
var nextToken string
var lastEvalKey string
var lastEvalPath string
// We always fetch pageSize+1, so if we have pageSize+1 results, we need to
// create a page token. We can't continue from result.LastEvaluatedKey since
// that will skip over the last result. So we should continue from the last
// visible result.
if len(result.Items) > int(pageSize) {
lastEvalKey = *result.Items[len(pairs)-1]["Key"].S
lastEvalPath = *result.Items[len(pairs)-1]["Path"].S
} else if result.LastEvaluatedKey != nil {
lastEvalKey = *result.LastEvaluatedKey["Key"].S
lastEvalPath = *result.LastEvaluatedKey["Path"].S
}
if lastEvalKey != "" && lastEvalPath != "" {
// Add page token to the database.
ptKey, err := generatePageToken()
if err != nil {
return nil, err
}
ptBytes, err := proto.Marshal(&basepb.DynamoDbPageToken{
LastKey: lastEvalKey,
LastPath: lastEvalPath,
})
if err != nil {
return nil, err
}
// Create the page token in the database, and it should expire in 2 days.
_, err = d.db.PutItem(&dynamodb.PutItemInput{
TableName: aws.String(d.tableName),
Item: map[string]*dynamodb.AttributeValue{
"Key": {
S: aws.String("_internals"),
},
"Path": {
S: aws.String(fmt.Sprintf("page_tokens/%s", ptKey)),
},
"Value": {
B: ptBytes,
},
"ExpiresAt": {
N: aws.String(fmt.Sprintf("%d", time.Now().Add(48*time.Hour).Unix())),
},
},
})
if err != nil {
return nil, err
}
nextToken = ptKey
}
return &kv.Query{
Prefix: prefix,
Pairs: pairs,
NextToken: nextToken,
}, nil
}
func generatePageToken() (string, error) {
possible := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
token := make([]byte, 48)
_, err := rand.Read(token)
if err != nil {
return "", err
}
for i := 0; i < len(token); i++ {
token[i] = possible[token[i]%byte(len(possible))]
}
return "v1." + string(token), nil
}

33
base/go/kv/kv.go Normal file
View File

@ -0,0 +1,33 @@
package kv
import (
"context"
"errors"
)
var (
ErrNotFound = errors.New("key not found")
ErrPageTokenNotFound = errors.New("page token not found")
ErrNoNamespace = errors.New("no namespace")
)
type Pair struct {
Key string
Value []byte
}
type Query struct {
Prefix string
Pairs []*Pair
NextToken string
}
type KV interface {
// Get returns the contents of a file from the storage backend.
// Key must have a namespace prefix.
// Example: /kernels/entries/123, where kernels is the namespace and the rest is the range key.
Get(ctx context.Context, key string) (*Pair, error)
Set(ctx context.Context, key string, value []byte) error
Delete(ctx context.Context, key string) error
RangePrefix(ctx context.Context, prefix string, pageSize int32, pageToken string) (*Query, error)
}

23
base/proto/BUILD vendored Normal file
View File

@ -0,0 +1,23 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
proto_library(
name = "basepb_proto",
srcs = ["kv_page_token.proto"],
visibility = ["//visibility:public"],
)
go_proto_library(
name = "basepb_go_proto",
importpath = "go.resf.org/peridot/base/go/pb",
proto = ":basepb_proto",
visibility = ["//visibility:public"],
)
go_library(
name = "pb",
embed = [":basepb_go_proto"],
importpath = "go.resf.org/peridot/base/go/pb",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,17 @@
syntax = "proto3";
package base.go;
option java_multiple_files = true;
option java_outer_classname = "KvPageTokenProto";
option java_package = "org.resf.base.go";
option go_package = "go.resf.org/peridot/base/go/pb;basepb";
// DynamoDbPageToken is the page token used for DynamoDB.
message DynamoDbPageToken {
// key is the last evaluated key.
string last_key = 1;
// path is the last evaluated path.
string last_path = 2;
}

1
base/ts/global.d.ts vendored
View File

@ -23,5 +23,6 @@ declare global {
interface Window {
__peridot_prefix__: string;
__peridot_user__: PeridotUser;
__beta__: boolean;
}
}

View File

@ -49,6 +49,9 @@ export interface ResourceTableProps<T> {
// Default filter to start with
defaultFilter?: string;
// Whether to hide the filter input
hideFilter?: boolean;
// load is usually the OpenAPI SDK function that loads the resource.
load(pageSize: number, pageToken?: string, filter?: string): Promise<any>;
@ -102,6 +105,7 @@ export function ResourceTable<T extends StandardResource>(
const [rows, setRows] = React.useState<T[] | undefined>(undefined);
const [loading, setLoading] = React.useState<boolean>(false);
const [filter, setFilter] = React.useState<string | undefined>(initFilter);
const [filterValue, setFilterValue] = React.useState<string | undefined>(initFilter);
const updateSearch = (replace = false) => {
const search = new URLSearchParams(location.search);
@ -219,7 +223,7 @@ export function ResourceTable<T extends StandardResource>(
// Load the resource using useEffect
React.useEffect(() => {
fetchResource().then();
}, [pageToken, rowsPerPage]);
}, [filter, pageToken, rowsPerPage]);
// For filter, we're going to wait for the user to stop typing for 500ms
// before we actually fetch the resource.
@ -231,9 +235,9 @@ export function ResourceTable<T extends StandardResource>(
clearTimeout(timeout.current);
}
timeout.current = setTimeout(() => {
fetchResource().then();
setFilter(filterValue);
}, 500);
}, [filter]);
}, [filterValue]);
// Create table header
const header = props.fields.map((field) => {
@ -262,20 +266,21 @@ export function ResourceTable<T extends StandardResource>(
// Create a search box for filter input
// This can be disabled if the request does not support filtering
const searchBox = (
const searchBox = !props.hideFilter && (
<Box sx={{ display: 'flex', alignItems: 'center', mb: 2, width: '100%' }}>
<TextField
sx={{ mr: 2, flexGrow: 1 }}
label="Filter"
variant="outlined"
size="small"
value={filter}
onChange={(event: React.ChangeEvent<HTMLInputElement>) => setFilter(event.target.value)}
value={filterValue}
onChange={(event: React.ChangeEvent<HTMLInputElement>) => setFilterValue(event.target.value)}
/>
<Button
variant="contained"
onClick={() => {
setFilter('');
setFilterValue('');
setPageToken(undefined);
}}
>

View File

@ -638,6 +638,7 @@ def go_dependencies():
sum = "h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=",
version = "v1.0.1",
)
go_repository(
name = "com_github_elazarl_goproxy",
importpath = "github.com/elazarl/goproxy",
@ -1885,6 +1886,7 @@ def go_dependencies():
sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=",
version = "v2.0.1+incompatible",
)
go_repository(
name = "com_github_pjbgf_sha1cd",
importpath = "github.com/pjbgf/sha1cd",

View File

@ -130,11 +130,12 @@ func spawnIbazel(target string) error {
func run(ctx *cli.Context) error {
// add default frontend flags if needed
if ctx.Bool("dev-frontend-flags") {
base.ChangeDefaultForEnvVar(base.EnvVarFrontendSelf, "http://localhost:9111")
base.ChangeDefaultForEnvVar(base.EnvVarFrontendOIDCIssuer, "http://127.0.0.1:5556/dex")
base.ChangeDefaultForEnvVar(base.EnvVarFrontendOIDCClientID, "local")
base.ChangeDefaultForEnvVar(base.EnvVarFrontendOIDCClientSecret, "local")
_ = os.Setenv(string(base.EnvVarFrontendSelf), "http://localhost:9111")
_ = os.Setenv(string(base.EnvVarFrontendOIDCIssuer), "http://127.0.0.1:5556/dex")
_ = os.Setenv(string(base.EnvVarFrontendOIDCClientID), "local")
_ = os.Setenv(string(base.EnvVarFrontendOIDCClientSecret), "local")
}
_ = os.Setenv("AWS_ENDPOINT", "http://localhost:4566")
// get current wd and make the ibazel path relative to it
wd, err := os.Getwd()

View File

@ -1,2 +0,0 @@
# yumrepofs2
An S3-backed Yum repository server

3
go.mod
View File

@ -36,6 +36,7 @@ require (
github.com/wk8/go-ordered-map/v2 v2.1.8
go.ciq.dev/pika v0.0.0-20230819201750-737c3e8f413d
go.resf.org/peridot/third_party/bazel/src/main/protobuf v0.0.0-00010101000000-000000000000
go.resf.org/peridot/tools/kernelmanager/pb v0.0.0-00010101000000-000000000000
go.resf.org/peridot/tools/mothership/admin/pb v0.0.0-00010101000000-000000000000
go.resf.org/peridot/tools/mothership/pb v0.0.0-00010101000000-000000000000
go.starlark.net v0.0.0-20230814145427-12f4cb8177e4
@ -293,3 +294,5 @@ replace go.resf.org/peridot/third_party/bazel/src/main/protobuf => ./bazel-bin/t
replace go.resf.org/peridot/tools/mothership/admin/pb => ./bazel-bin/tools/mothership/proto/admin/v1/mshipadminpb_go_proto_/go.resf.org/peridot/tools/mothership/admin/pb
replace google.golang.org/genproto/googleapis/longrunning => ./bazel-bin/third_party/googleapis/google/longrunning/longrunning_go_proto_/google.golang.org/genproto/googleapis/longrunning
replace go.resf.org/peridot/tools/kernelmanager/pb => ./bazel-bin/tools/kernelmanager/proto/v1/kernelmanagerpb_go_proto_/go.resf.org/peridot/tools/kernelmanager/pb

6
scripts/gosetup Executable file
View File

@ -0,0 +1,6 @@
#!/usr/bin/env bash
set -euo pipefail
bazel build $(bazel query 'kind(go_proto_library, //...)')
bazel run //devtools/nfv2

View File

@ -2,9 +2,8 @@
set -euo pipefail
go mod tidy -e
rm -rf vendor
go mod vendor -e
go mod tidy
go mod vendor
find vendor -name "BUILD.bazel" -delete
find vendor -name "BUILD" -delete

54
third_party/peridot/proto/v1/BUILD vendored Normal file
View File

@ -0,0 +1,54 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
proto_library(
name = "peridotpb_proto",
srcs = [
"batch.proto",
"build.proto",
"catalog.proto",
"import.proto",
"module.proto",
"package.proto",
"project.proto",
"rpm.proto",
"search.proto",
"task.proto",
],
visibility = ["//visibility:public"],
deps = [
"//third_party/peridot/proto/v1/yumrepofs:yumrepofspb_proto",
"@com_google_protobuf//:any_proto",
"@com_google_protobuf//:descriptor_proto",
"@com_google_protobuf//:timestamp_proto",
"@com_google_protobuf//:wrappers_proto",
"@go_googleapis//google/api:httpbody_proto",
"@go_googleapis//google/rpc:errdetails_proto",
"@googleapis//google/api:annotations_proto",
],
)
go_proto_library(
name = "peridotpb_go_proto",
compilers = [
"@io_bazel_rules_go//proto:go_grpc",
"//:go_gen_grpc_gateway",
],
importpath = "peridot.resf.org/peridot/pb",
proto = ":peridotpb_proto",
visibility = ["//visibility:public"],
deps = [
"//third_party/peridot/proto/v1/yumrepofs:pb",
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@org_golang_google_genproto//googleapis/api/annotations",
],
)
go_library(
name = "pb",
embed = [":peridotpb_go_proto"],
importpath = "peridot.resf.org/peridot/pb",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,35 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
proto_library(
name = "adminpb_proto",
srcs = ["admin.proto"],
visibility = ["//visibility:public"],
deps = [
"//third_party/peridot/proto/v1:peridotpb_proto",
"@googleapis//google/api:annotations_proto",
],
)
go_proto_library(
name = "adminpb_go_proto",
compilers = [
"@io_bazel_rules_go//proto:go_grpc",
"//:go_gen_grpc_gateway",
],
importpath = "peridot.resf.org/peridot/admin/pb",
proto = ":adminpb_proto",
visibility = ["//visibility:public"],
deps = [
"//third_party/peridot/proto/v1:pb",
"@org_golang_google_genproto//googleapis/api/annotations",
],
)
go_library(
name = "pb",
embed = [":adminpb_go_proto"],
importpath = "peridot.resf.org/peridot/admin/pb",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,28 @@
syntax = "proto3";
package resf.peridot.admin.v1;
import "google/api/annotations.proto";
import "third_party/peridot/proto/v1/task.proto";
option go_package = "peridot.resf.org/peridot/admin/pb;adminpb";
service PeridotAdminService {
rpc AddUpdateInformation (AddUpdateInformationRequest) returns (resf.peridot.v1.AsyncTask) {
option (google.api.http) = {
post: "/v1/admin/add_update_information"
body: "*"
};
option (resf.peridot.v1.task_info) = {
response_type: "AddUpdateInformationTask"
metadata_type: "AddUpdateInformationRequest"
};
}
}
message AddUpdateInformationRequest {
string project_id = 1;
string product_name = 2;
}
message AddUpdateInformationTask {}

View File

@ -0,0 +1,11 @@
syntax = "proto3";
package resf.peridot.v1;
import "third_party/peridot/proto/v1/task.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
message BatchFilter {
TaskStatus status = 1;
}

385
third_party/peridot/proto/v1/build.proto vendored Normal file
View File

@ -0,0 +1,385 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
import "third_party/peridot/proto/v1/import.proto";
import "third_party/peridot/proto/v1/task.proto";
import "third_party/peridot/proto/v1/batch.proto";
import "third_party/peridot/proto/v1/yumrepofs/yumrepofs.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
service BuildService {
// ListBuilds returns all builds filtered through given filters
rpc ListBuilds(ListBuildsRequest) returns (ListBuildsResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/builds"
};
}
// GetBuild returns a build by its id
rpc GetBuild(GetBuildRequest) returns (GetBuildResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/builds/{build_id=*}"
};
}
// ListBuildBatches returns all build batches
rpc ListBuildBatches(ListBuildBatchesRequest) returns (ListBuildBatchesResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/build_batches"
};
}
// GetBuildBatch returns a build batch by its id
rpc GetBuildBatch(GetBuildBatchRequest) returns (GetBuildBatchResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/build_batches/{build_batch_id=*}"
};
}
// SubmitBuild builds a package scoped to a project
// The project has to contain an import for the specific package
// This method is asynchronous. Peridot uses the AsyncTask abstraction.
// Check out `//third_party/peridot/proto/v1:task.proto` for more information
rpc SubmitBuild(SubmitBuildRequest) returns (AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/builds"
body: "*"
};
option (resf.peridot.v1.task_info) = {
response_type: "SubmitBuildTask"
metadata_type: "PackageOperationMetadata"
};
}
// SubmitBuildBatch submits a batch of builds.
rpc SubmitBuildBatch(SubmitBuildBatchRequest) returns (SubmitBuildBatchResponse) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/build_batches"
body: "*"
};
}
// RpmImport imports rpm files into a project (packaged into tar format)
rpc RpmImport(RpmImportRequest) returns (AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/builds/rpm-import"
body: "*"
};
option (resf.peridot.v1.task_info) = {
response_type: "RpmImportTask"
metadata_type: "RpmImportOperationMetadata"
};
}
// RpmLookasideBatchImport imports rpm files into a project (stored in Lookaside)
rpc RpmLookasideBatchImport(RpmLookasideBatchImportRequest) returns (AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/builds/rpm-lookaside-batch-import"
body: "*"
};
option (resf.peridot.v1.task_info) = {
response_type: "RpmLookasideBatchImportTask"
metadata_type: "RpmLookasideBatchImportOperationMetadata"
};
}
}
message Build {
// Unique identifier for the specific build
string id = 1;
// Timestamp the build was created
google.protobuf.Timestamp created_at = 2;
// Package or module name
string name = 3;
// Import revisions (usually one, but multiple for modules)
repeated ImportRevision import_revisions = 4;
// Parent task ID for the specific build
string task_id = 5;
// Task status
TaskStatus status = 6;
}
message SubmitBuildRequest {
// Project ID that we want this build to be assigned to
// All build requests need a project id, however after
// the initial import, sharing the VRE in an inter-project
// way is possible.
string project_id = 1;
// Package name we want to build
// Has to follow the OpenPatch architecture
oneof package {
google.protobuf.StringValue package_name = 2;
google.protobuf.StringValue package_id = 8;
}
// Specific import hash to build
// Optional, latest is built if null
google.protobuf.StringValue scm_hash = 3;
// Disable checks will disable testing on specified target
// NOT RECOMMENDED AND WILL BE LIMITED IN THE FUTURE
// THE RESULTING ARTIFACTS SHOULD NOT BE USED
bool disable_checks = 4;
// Only build specified branches
// Branches is only valid for modular packages
// If specified, "normal" packages won't be built
repeated string branches = 5;
// Whether to build module variant only
// This only works with packages with two variants
// Ignored for single variant packages
bool module_variant = 6;
// Side NVRs pulls in the specified NVRs only for this build
// Fails if any NVR is not available
repeated string side_nvrs = 7;
// Whether to set inactive or not
bool set_inactive = 9;
}
message SubmitBuildBatchRequest {
// Only the top-most project id is used for all build requests
string project_id = 1;
repeated SubmitBuildRequest builds = 2;
}
message SubmitBuildBatchResponse {
string build_batch_id = 1;
}
message SubmitBuildTask {
// Build ID is the unique identifier that is used for a specific build request
string build_id = 1;
// Package name that was built
string package_name = 2;
// Import revision that was built
ImportRevision import_revision = 3;
// All produced artifacts (all artifacts should be available in blob storage)
repeated TaskArtifact artifacts = 4;
// True if tests/checks was disabled
bool checks_disabled = 5;
// Whether it was a module build or not
bool modular = 6;
// Parent task ID (usually for module builds)
google.protobuf.StringValue parent_task_id = 7;
// Repo changes
resf.peridot.yumrepofs.v1.UpdateRepoTask repo_changes = 8;
// Build task ID is the unique identifier that is used for a specific build request
string build_task_id = 9;
}
message SubmitBuildBatchTask {
repeated SubmitBuildTask builds = 1;
}
message BuildFilters {
// The status filter only returns builds that
// has the given status
TaskStatus status = 1;
google.protobuf.StringValue package_name = 2;
}
message ListBuildsRequest {
string project_id = 1;
// Filters that should be applied to the list query
// No filters will return all builds globally
BuildFilters filters = 2;
int32 page = 3;
int32 limit = 4;
}
message ListBuildsResponse {
repeated Build builds = 1;
// Total packages from server
int64 total = 2;
// Limit from request
int32 size = 3;
// Current page
int32 page = 4;
}
message GetBuildRequest {
string project_id = 1;
string build_id = 2;
}
message GetBuildResponse {
Build build = 1;
}
message BuildBatch {
string id = 1;
google.protobuf.Timestamp created_at = 2;
int32 count = 3;
int32 pending = 4;
int32 running = 5;
int32 succeeded = 6;
int32 failed = 7;
int32 canceled = 8;
}
message ListBuildBatchesRequest {
string project_id = 1;
int32 page = 2;
int32 limit = 3;
}
message ListBuildBatchesResponse {
repeated BuildBatch build_batches = 1;
// Total packages from server
int64 total = 2;
// Limit from request
int32 size = 3;
// Current page
int32 page = 4;
}
message GetBuildBatchRequest {
string project_id = 1;
string build_batch_id = 2;
int32 page = 3;
int32 limit = 4;
BatchFilter filter = 5;
}
message GetBuildBatchResponse {
repeated Build builds = 1;
int32 pending = 2;
int32 running = 3;
int32 succeeded = 4;
int32 failed = 5;
int32 canceled = 6;
// Total packages from server
int64 total = 7;
// Limit from request
int32 size = 8;
// Current page
int32 page = 9;
}
message ExtraYumrepofsRepo {
string name = 1;
bool module_hotfixes = 2;
bool ignore_exclude = 3;
int32 priority = 4;
}
// These options can be used to customize the behavior of the service
// Can only be defined/set by internal services
// Is especially used when building modules and module components
message ExtraBuildOptions {
// Disable yumrepofs updates
bool disable_yumrepofs_updates = 1;
// Extra files that should be added to the build root
// Key = path to file, value = contents of file
map<string, string> build_arch_extra_files = 2;
// Reusable build ID
// Used specifically for module builds
string reusable_build_id = 3;
// Extra yumrepofs repos that should be added to the build root
repeated ExtraYumrepofsRepo extra_yumrepofs_repos = 4;
// Whether the build is part of a batch
string build_batch_id = 5;
// Modules to enable during build
repeated string modules = 6;
// Modules to be disabled during build
repeated string disabled_modules = 10;
// Packages to exclude from all repositories not marked with ignore_exclude
repeated string exclude_packages = 7;
// Whether to enable networking in rpmbuild
bool enable_networking = 8;
// Force a specific dist
string force_dist = 9;
}
message RpmImportRequest {
string project_id = 1;
// Rpms
//
// Previously uploaded RPM tarball
string rpms = 2;
// Force override
//
// Overwrite existing RPMs even if NVRA is locked
// Useful for secure boot scenarios for example
bool force_override = 3;
}
message RpmImportTask {
resf.peridot.yumrepofs.v1.UpdateRepoTask repo_changes = 1;
}
message RpmImportOperationMetadata {
string package_name = 1;
}
message RpmLookasideBatchImportRequest {
string project_id = 1;
// Rpms
//
// Previously uploaded RPM tarball
repeated string lookaside_blobs = 2;
// Force override
//
// Overwrite existing RPMs even if NVRA is locked
// Useful for secure boot scenarios for example
bool force_override = 3;
}
message RpmLookasideBatchImportTask {
resf.peridot.yumrepofs.v1.UpdateRepoTask repo_changes = 1;
}
message RpmLookasideBatchImportOperationMetadata {
repeated string package_names = 1;
}

View File

@ -0,0 +1,107 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
import "third_party/peridot/proto/v1/task.proto";
import "third_party/peridot/proto/v1/package.proto";
import "third_party/peridot/proto/v1/module.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
message CatalogSyncRepository {
string name = 1;
repeated string include_filter = 2;
repeated string multilib = 3;
repeated string module_stream = 4;
}
message CatalogSyncPackage {
string name = 1;
resf.peridot.v1.PackageType type = 2;
repeated string module_component = 3;
repeated CatalogSyncRepository repository = 4;
}
message GlobArch {
// Regex pattern to match the architecture.
// Can also be set to "*" to match all architectures.
string key = 1;
// Glob to match the package in the repository.
repeated string glob_match = 2;
}
message GlobFilter {
// Regex pattern to match.
// Can also be set to "*" to match all strings.
string repo_match = 1;
repeated GlobArch arch = 2;
}
message CatalogSync {
repeated CatalogSyncPackage package = 1;
repeated string additional_multilib = 2;
repeated string exclude_multilib_filter = 3;
repeated GlobFilter exclude_filter = 4;
repeated GlobFilter include_filter = 5;
resf.peridot.v1.ModuleConfiguration module_configuration = 6;
}
message CatalogExtraPackageOptions {
string name = 1;
repeated string with = 2;
repeated string without = 3;
}
message CatalogGroupInstallScopedPackage {
string name = 1;
repeated string depends_on = 2;
repeated string enable_module = 3;
repeated string disable_module = 4;
}
message CatalogGroupInstallOption {
// list of all packages required to be installed in the build root per project
repeated string name = 1;
// Scoped packages allow for dynamically injecting build requirements into the build root e.g. when building SCLs
repeated CatalogGroupInstallScopedPackage scoped_package = 2;
}
message CatalogGroupInstallOptions {
CatalogGroupInstallOption srpm = 1;
CatalogGroupInstallOption build = 2;
}
message CatalogExtraOptions {
repeated CatalogExtraPackageOptions package_options = 1;
}
message KindCatalogSync {
repeated string new_packages = 1;
repeated string modified_packages = 4;
repeated string new_repositories = 2;
repeated string modified_repositories = 3;
repeated string additional_nvr_globs = 5;
resf.peridot.v1.ModuleConfiguration module_configuration = 6;
}
message KindCatalogExtraOptions {
repeated string modified_packages = 1;
}
message KindCatalogGroupInstallOptions {
repeated string srpm_packages = 1;
repeated string build_packages = 2;
repeated CatalogGroupInstallScopedPackage scoped_package = 3;
}
message SyncCatalogTask {
KindCatalogSync catalog_sync = 1;
KindCatalogExtraOptions extra_options = 2;
KindCatalogGroupInstallOptions group_install_options = 4;
repeated string reprocess_build_ids = 3;
}

View File

@ -0,0 +1,300 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
import "third_party/peridot/proto/v1/task.proto";
import "third_party/peridot/proto/v1/batch.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
// ImportService provides import capabilities to projects.
// Src imports aren't necessarily "imports" per-se,
// but the term is used loosely and means both a literal import as
// well as revision detection within Peridot.
// This is done for reproducibility as well as accountability purposes.
// todo(mustafa): Add more information about IAM
service ImportService {
// ListImports lists all imports for a project.
rpc ListImports(ListImportsRequest) returns (ListImportsResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/imports"
};
}
// GetImport gets an import by ID.
rpc GetImport(GetImportRequest) returns (GetImportResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/imports/{import_id=*}"
};
}
// ListImportBatches lists all import batches for a project.
rpc ListImportBatches(ListImportBatchesRequest) returns (ListImportBatchesResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/import_batches"
};
}
// GetImportBatch gets an import batch by ID.
rpc GetImportBatch(GetImportBatchRequest) returns (GetImportBatchResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/import_batches/{import_batch_id=*}"
};
}
// ImportPackage imports a package scoped to a project
// This method is asynchronous. Peridot uses the AsyncTask abstraction.
// Check out `//third_party/peridot/proto/v1:task.proto` for more information
// TODO low-pri: Support inter-project imports
rpc ImportPackage(ImportPackageRequest) returns (AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/imports"
body: "*"
};
option (resf.peridot.v1.task_info) = {
response_type: "ImportPackageTask"
metadata_type: "PackageOperationMetadata"
};
};
// ImportPackageBatch imports a batch of packages scoped to a project
rpc ImportPackageBatch(ImportPackageBatchRequest) returns (ImportPackageBatchResponse) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/import_batches"
body: "*"
};
};
// ImportBatchRetryFailed retries failed imports in a batch.
rpc ImportBatchRetryFailed(ImportBatchRetryFailedRequest) returns (ImportBatchRetryFailedResponse) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/import_batches/{import_batch_id=*}/retry_failed"
};
}
}
message Import {
// Unique identifier for the specific build
string id = 1;
// Timestamp the build was created
google.protobuf.Timestamp created_at = 2;
// Package or module name
string name = 3;
// Parent task ID for the specific build
string task_id = 5;
// Task status
TaskStatus status = 6;
// Revisions for the import
repeated ImportRevision revisions = 7;
}
message ImportBatch {
string id = 1;
google.protobuf.Timestamp created_at = 2;
int32 count = 3;
int32 pending = 4;
int32 running = 5;
int32 succeeded = 6;
int32 failed = 7;
int32 canceled = 8;
}
message ListImportsRequest {
string project_id = 1;
int32 page = 2;
int32 limit = 3;
}
message ListImportsResponse {
repeated Import imports = 1;
// Total packages from server
int64 total = 2;
// Limit from request
int32 size = 3;
// Current page
int32 page = 4;
}
message GetImportRequest {
string project_id = 1;
string import_id = 2;
}
message GetImportResponse {
Import import = 1;
}
message ListImportBatchesRequest {
string project_id = 1;
int32 page = 2;
int32 limit = 3;
}
message ListImportBatchesResponse {
repeated ImportBatch import_batches = 1;
// Total packages from server
int64 total = 2;
// Limit from request
int32 size = 3;
// Current page
int32 page = 4;
}
message GetImportBatchRequest {
string project_id = 1;
string import_batch_id = 2;
int32 page = 3;
int32 limit = 4;
BatchFilter filter = 5;
}
message GetImportBatchResponse {
repeated Import imports = 1;
int32 pending = 2;
int32 running = 3;
int32 succeeded = 4;
int32 failed = 5;
int32 canceled = 6;
// Total packages from server
int64 total = 7;
// Limit from request
int32 size = 8;
// Current page
int32 page = 9;
}
// VersionRelease contains versioning information about
// a specific package
message VersionRelease {
// Version is the RPM version field from the spec
google.protobuf.StringValue version = 1;
// Release is the RPM release field from the spec
// This value will have the %{?dist} macro expanded
google.protobuf.StringValue release = 2;
}
// ImportPackageRequest is the request message for ImportService.ImportPackage
message ImportPackageRequest {
// Project ID that we want this import to be assigned to
// All import requests need a project id, however after
// the initial import, sharing the VRE in an inter-project
// way is possible.
string project_id = 1;
// Package name/ID we want to import
// Has to follow the OpenPatch architecture
oneof package {
google.protobuf.StringValue package_name = 2;
google.protobuf.StringValue package_id = 4;
}
// Specific version details that we want the import to adhere to
// Can be used to import a specific version for example, or the version
// released mid-cycle for a point release
VersionRelease vre = 3;
// Whether to set import as inactive or not.
// This will make the import not appear as the latest
bool set_inactive = 5;
}
message ImportPackageBatchRequest {
// Only the top-most project id is used for all import requests
string project_id = 1;
repeated ImportPackageRequest imports = 2;
}
message ImportPackageBatchResponse {
string import_batch_id = 1;
}
message ImportBatchRetryFailedRequest {
string project_id = 1;
string import_batch_id = 2;
}
message ImportBatchRetryFailedResponse {
string import_batch_id = 1;
}
// ImportRevision is SCM metadata as well as versioning information of a specific
// import element.
message ImportRevision {
// SCM Hash for the specific revision. For example Git hash
string scm_hash = 1;
// Branch name that we imported the content from
string scm_branch_name = 2;
// Versioning details that was parsed from the spec file
VersionRelease vre = 3;
// Indicate whether this was a module import or not
bool module = 4;
// Indicate whether this was a module stream import or not
bool module_stream = 5;
// Upstream URL for the import
string scm_url = 6;
// Package version id
string package_version_id = 7;
}
// ImportPackageTask is the AsyncTask metadata that is included
// in the parent task for a specific import request
message ImportPackageTask {
// Import ID is the unique identifier that is used for a specific import request
string import_id = 1;
// Package name that was imported
string package_name = 2;
// Revisions that was able to be imported
repeated ImportRevision import_revisions = 3;
}
message ImportPackageBatchTask {
repeated ImportPackageTask imports = 1;
}
// PackageSrcGitResponse is the Temporal Activity metadata for PackageSrcGit
// Contains information about lookaside blobs that was packaged as well as
// the subtask ID
message PackageSrcGitResponse {
// A map of tarballs with their SHA256 hashes
// These blobs were uploaded to the lookaside cache
// and should be present in the metadata.
// The information is added to the metadata during
// dist-git sync
map<string, string> name_hashes = 1;
// Subtask ID
string task_id = 2;
}
message ExtraImportOptions {
// Whether the import is part of a batch
string import_batch_id = 1;
}

View File

@ -0,0 +1,35 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
proto_library(
name = "keykeeperpb_proto",
srcs = ["keykeeper.proto"],
visibility = ["//visibility:public"],
deps = [
"//third_party/peridot/proto/v1:peridotpb_proto",
"@googleapis//google/api:annotations_proto",
],
)
go_proto_library(
name = "keykeeperpb_go_proto",
compilers = [
"@io_bazel_rules_go//proto:go_grpc",
"//:go_gen_grpc_gateway",
],
importpath = "peridot.resf.org/peridot/keykeeper/pb",
proto = ":keykeeperpb_proto",
visibility = ["//visibility:public"],
deps = [
"//third_party/peridot/proto/v1:pb",
"@org_golang_google_genproto//googleapis/api/annotations",
],
)
go_library(
name = "pb",
embed = [":keykeeperpb_go_proto"],
importpath = "peridot.resf.org/peridot/keykeeper/pb",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,117 @@
syntax = "proto3";
package resf.peridot.keykeeper.v1;
import "third_party/peridot/proto/v1/task.proto";
import "google/api/annotations.proto";
option go_package = "peridot.resf.org/peridot/keykeeper/pb;keykeeperpb";
service KeykeeperService {
// GenerateKey generates a new key pair and attaches the key to the given project.
// todo(mustafa): Evaluate if we need to convert this to AsyncTask.
// todo(mustafa): Since this call may be long running (we're talking about 30-60 seconds),
rpc GenerateKey(GenerateKeyRequest) returns (GenerateKeyResponse) {
option (google.api.http) = {
post: "/v1/generate-key"
body: "*"
};
}
// GetPublicKey returns the public key of the given key id
rpc GetPublicKey(GetPublicKeyRequest) returns (GetPublicKeyResponse) {
option (google.api.http) = {
get: "/v1/keys/{key_name=*}/public-key"
};
}
// ImportKey imports a key pair and attaches it to the given project.
// todo(mustafa): Currently unimplmented.
rpc ImportKey(ImportKeyRequest) returns (ImportKeyResponse) {
option (google.api.http) = {
post: "/v1/import-key"
body: "*"
};
}
rpc SignArtifacts(SignArtifactsRequest) returns (resf.peridot.v1.AsyncTask) {
option (google.api.http) = {
post: "/v1/sign-artifacts"
body: "*"
};
option (resf.peridot.v1.task_info) = {
response_type: "SignArtifactsTask"
metadata_type: "SignArtifactsRequest"
};
}
rpc SignText(SignTextRequest) returns (SignTextResponse) {
option (google.api.http) = {
post: "/v1/sign-text"
body: "*"
};
}
}
message GenerateKeyRequest {
// Project that the key will be attached to.
string project_id = 1;
// Name of the key to generate.
string name = 2;
// Email to associate with the generated key.
string email = 3;
}
message GenerateKeyResponse {
string name = 1;
string email = 2;
string fingerprint = 3;
}
message GetPublicKeyRequest {
string key_name = 1;
}
message GetPublicKeyResponse {
string public_key = 1;
}
// todo(mustafa): Implement
message ImportKeyRequest {}
message ImportKeyResponse {}
message SignedArtifact {
string path = 1;
string hash_sha256 = 2;
}
message SignArtifactsRequest {
// Build ID of the artifacts to sign.
// The artifacts has to be attached to the given build.
string build_id = 1;
// Key name is the key that the artifacts is signed with.
string key_name = 2;
}
message SignArtifactsTask {
repeated SignedArtifact signed_artifacts = 1;
}
message BatchSignArtifactsTask {
repeated SignArtifactsTask tasks = 1;
}
message SignTextRequest {
// Text to sign.
string text = 1;
// Key name is the key that the artifacts is signed with.
string key_name = 2;
}
message SignTextResponse {
string signature = 1;
}

View File

@ -0,0 +1,90 @@
syntax = "proto3";
package resf.peridot.v1;
import "third_party/peridot/proto/v1/import.proto";
import "third_party/peridot/proto/v1/build.proto";
import "third_party/peridot/proto/v1/yumrepofs/yumrepofs.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
message Dependencies {
// Rpm based dependencies
// This is a list of dependencies that are installed
repeated string rpm = 1;
}
message ModulePlatform {
// Major version for EL release.
int32 major = 1;
// Minor version for EL release.
int32 minor = 2;
// Patch version for EL release.
int32 patch = 3;
// Virtual streams for EL release.
// Added to maintain compatibility with MBS (fm-orchestrator).
repeated string provides = 4;
// Buildroot dependencies
Dependencies buildroot = 5;
// Srpmroot dependencies
Dependencies srpmroot = 6;
}
message ModuleDefaultProfile {
string stream = 1;
repeated string name = 2;
}
message ModuleDefault {
string name = 1;
string stream = 2;
repeated ModuleDefaultProfile profile = 3;
// Short-hand for streams with common profile only
repeated string common_profile = 4;
}
message ModuleConfiguration {
ModulePlatform platform = 1;
repeated ModuleDefault default = 2;
}
message ModuleStreamDocument {
map<string, bytes> streams = 1;
}
message ModuleStream {
string dist = 1;
int64 increment = 2;
string name = 3;
string stream = 4;
string version = 5;
string context = 6;
resf.peridot.v1.ImportRevision import_revision = 7;
ModuleConfiguration configuration = 8;
// Mapped to arch
map<string, ModuleStreamDocument> module_stream_documents = 9;
repeated resf.peridot.v1.SubmitBuildTask builds = 10;
}
message ModuleBuildTask {
repeated ModuleStream streams = 1;
// Repo changes
resf.peridot.yumrepofs.v1.UpdateRepoTask repo_changes = 2;
}
message ModuleScm {
string ref = 1;
}
message ModuleMetadata {
string scm_hash = 1;
string scm_url = 2;
map<string, ModuleScm> rpms = 3;
}

View File

@ -0,0 +1,206 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/any.proto";
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "third_party/peridot/proto/v1/import.proto";
import "third_party/peridot/proto/v1/task.proto";
import "google/api/annotations.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
// PackageService provides methods to list, search and edit packages in a project
service PackageService {
// ListPackages returns all packages with filters applied
rpc ListPackages(ListPackagesRequest) returns (ListPackagesResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/packages"
};
}
// GetPackage returns a package by its id or name
rpc GetPackage(GetPackageRequest) returns (GetPackageResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/packages/{field=*}/{value=*}"
};
}
}
// PackageType helps determine what method of import/build should be used
// for a specific package
enum PackageType {
// Unknown value. Should never be used
PACKAGE_TYPE_DEFAULT = 0;
// Normal packages from downstream dist-git
// The repos are imported as-is
// This will never be used as PACKAGE_TYPE_NORMAL_FORK
// accomplishes the same task without duplicate work
PACKAGE_TYPE_NORMAL = 1;
// Normal packages from upstream dist-git
// The repos are first imported into target dist-git using srpmproc (with eventual patches)
// and then imported as-is into Peridot
PACKAGE_TYPE_NORMAL_FORK = 2;
// Source packages from downstream src-git
// The sources are packaged into tarballs and uploaded into lookaside,
// and the repo with sources removed is then pushed into dist-git with a
// following metadata file.
// This package type enables an automatic src-git packaging workflow, but
// a manual workflow may be adapted as well with manual packaging.
// The package should then be set to PACKAGE_TYPE_NORMAL if manual packaging
// is desired.
PACKAGE_TYPE_NORMAL_SRC = 3;
// todo(mustafa): Document rest
// PACKAGE_TYPE_MODULE = 4;
// PACKAGE_TYPE_MODULE_COMPONENT = 5;
PACKAGE_TYPE_MODULE_FORK = 6;
PACKAGE_TYPE_MODULE_FORK_COMPONENT = 7;
// A package may be both a normally forked package and a module
// So we need to differentiate between the two
PACKAGE_TYPE_NORMAL_FORK_MODULE = 8;
// A package may also be a module component and a normal package
// So we need to differentiate between the two
PACKAGE_TYPE_NORMAL_FORK_MODULE_COMPONENT = 9;
// A package may be both a module and a module component
PACKAGE_TYPE_MODULE_FORK_MODULE_COMPONENT = 10;
}
// Package is an importable, buildable and publishable RPM package
// Packages are globally scoped but interaction with package related actions
// are done within project scoped methods.
// A global scope ensures that builds and imports can be freely shared between
// projects and makes it easier to freeze, clone and develop projects.
// Going from a minor release to the next should be easier
// Maintaining beta releases and backporting them after release is also a huge plus
message Package {
// Unique identifier of type UUID v4
string id = 1;
// Name of package
// Does not have to be unique (globally scoped)
string name = 2;
// Since names does not have to be unique (in the case of multi-version, cross-project packages)
// a distinction must be added to each package.
// For example when moving a point release to extended support mode, the package will be cloned
// but will be recognized by this distinction
// string distinction = 3;
// todo(mustafa): May re-add later but packages can have multiple versions used in different projects now
// todo(mustafa): so removing this for now
reserved 3;
// Type of package. Fork/dist/src types are all scoped by package and not project.
// This means that a project may contain both forked and original packages
PackageType type = 4;
// Last time the package was imported (for the active version in project)
google.protobuf.Timestamp last_import_at = 5;
// Last time the package was built (for the active version in project)
google.protobuf.Timestamp last_build_at = 6;
}
// PackageVersion is a collection of metadata on import and package versions within
// a project. Also indicates whether the version is active or not for a given project
message PackageVersion {
ImportRevision import_revision = 1;
repeated Subtask import_tasks = 2;
// Whether this is the active package version for the project
bool active = 3;
// Whether this is the version active in yumrepofs, not necessarily
// the active version. As there is a transition workflow of
// import -> active (in project) -> build (while older version is active in REPO) -> active (in project and repo)
bool active_in_repo = 4;
}
// DetailedPackage includes extra details that a broad list/search of
// Packages do not include. Useful for single package queries
message DetailedPackage {
Package package = 1;
PackageVersion active_version = 2;
}
// PackageFilters is values the requester can use to narrow
// down on list/search results for Package queries
message PackageFilters {
google.protobuf.StringValue id = 1;
google.protobuf.StringValue name = 2;
// When true, modular packages will be returned
google.protobuf.BoolValue modular = 3;
// Only matches exact name
google.protobuf.StringValue name_exact = 4;
// Matches if no imports are present
google.protobuf.BoolValue no_imports = 5;
// Matches if no builds for latest import
google.protobuf.BoolValue no_builds = 6;
}
// ListPackagesRequest is the request message for PackageService.ListPackages
message ListPackagesRequest {
// The page to request
int32 page = 1;
// Maximum amount of results to return
// Minimum: 1
// Maximum: 100
int32 limit = 2;
// Project ID that should be queried
string project_id = 3;
// Filters to narrow down on results
PackageFilters filters = 4;
}
// ListPackagesResponse is the response message for PackageService.ListPackages
message ListPackagesResponse {
// Packages returned from server
repeated Package packages = 1;
// Total packages from server
int64 total = 2;
// Limit from request
int32 size = 3;
// Current page
int32 page = 4;
}
message PackageOperationMetadata {
// Name of the package
string package_name = 1;
// Whether the package is modular
bool modular = 2;
// Calling task type
TaskType task_type = 3;
// Extra information about the task
google.protobuf.Any task_metadata = 4;
}
message GetPackageRequest {
string project_id = 1;
string field = 2;
string value = 3;
}
message GetPackageResponse {
Package package = 1;
}

View File

@ -0,0 +1,367 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
import "third_party/peridot/proto/v1/task.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
service ProjectService {
rpc CreateProject(CreateProjectRequest) returns (CreateProjectResponse) {
option (google.api.http) = {
post: "/v1/projects"
body: "*"
};
}
rpc UpdateProject(UpdateProjectRequest) returns (UpdateProjectResponse) {
option (google.api.http) = {
put: "/v1/projects/{project_id=*}"
body: "*"
};
}
rpc ListProjects(ListProjectsRequest) returns (ListProjectsResponse) {
option (google.api.http) = {
get: "/v1/projects"
};
}
rpc GetProject(GetProjectRequest) returns (GetProjectResponse) {
option (google.api.http) = {
get: "/v1/projects/{id=*}"
};
}
rpc ListRepositories(ListRepositoriesRequest) returns (ListRepositoriesResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repositories"
};
}
rpc GetRepository(GetRepositoryRequest) returns (GetRepositoryResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repositories/{id=*}"
};
}
rpc GetProjectCredentials(GetProjectCredentialsRequest) returns (GetProjectCredentialsResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/credentials"
};
}
rpc SetProjectCredentials(SetProjectCredentialsRequest) returns (SetProjectCredentialsResponse) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/credentials"
body: "*"
};
}
rpc SyncCatalog(SyncCatalogRequest) returns (resf.peridot.v1.AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/catalogsync"
body: "*"
};
}
rpc CreateHashedRepositories(CreateHashedRepositoriesRequest) returns (resf.peridot.v1.AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/repositories/hashed"
body: "*"
};
}
rpc LookasideFileUpload(LookasideFileUploadRequest) returns (LookasideFileUploadResponse) {
option (google.api.http) = {
post: "/v1/lookaside"
body: "*"
};
}
rpc CloneSwap(CloneSwapRequest) returns (resf.peridot.v1.AsyncTask) {
option (google.api.http) = {
post: "/v1/projects/{target_project_id=*}/cloneswap"
body: "*"
};
}
rpc ListExternalRepositories(ListExternalRepositoriesRequest) returns (ListExternalRepositoriesResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/external_repositories"
};
}
rpc DeleteExternalRepository(DeleteExternalRepositoryRequest) returns (DeleteExternalRepositoryResponse) {
option (google.api.http) = {
delete: "/v1/projects/{project_id=*}/external_repositories/{id=*}"
};
}
}
// Project is a contained RPM distribution
// Packages, builds, imports and tasks are assigned to projects (multi-projects possible)
// All actions currently has to belong to a project and currently all operational
// metadata is kept within a project. Such as source scm and target scm.
// Think of projects as each their own software collection
message Project {
// Unique identifier of format UUID v4
string id = 1;
// When project was created
google.protobuf.Timestamp created_at = 2;
// When project was last updated
google.protobuf.Timestamp updated_at = 3;
// Name of project, must be unique
google.protobuf.StringValue name = 4;
// Major version of project (e.g. 8 for EL8 and 1800 for openSUSE 18)
google.protobuf.Int32Value major_version = 5;
// Architectures supported by this project
repeated string archs = 6;
// Dist tag that this project will supply for builds
google.protobuf.StringValue dist_tag = 7;
// Target gitlab host
google.protobuf.StringValue target_gitlab_host = 8;
// Target prefix
google.protobuf.StringValue target_prefix = 9;
// Target branch prefix
google.protobuf.StringValue target_branch_prefix = 10;
// Source git host
google.protobuf.StringValue source_git_host = 11;
// Source prefix
google.protobuf.StringValue source_prefix = 12;
// Source branch prefix
google.protobuf.StringValue source_branch_prefix = 13;
// URL to blob storage
google.protobuf.StringValue cdn_url = 14;
// Whether to fetch Stream packages
bool stream_mode = 15;
// Target vendor
string target_vendor = 16;
// Target additional vendor
google.protobuf.StringValue additional_vendor = 17;
// Whether the builds should forcefully use import dist tag
bool follow_import_dist = 18;
// Branch suffix
google.protobuf.StringValue branch_suffix = 19;
// Whether to make the resulting import repositories public
bool git_make_public = 20;
// Vendor macro is what gets inserted as the vendor in the RPM
google.protobuf.StringValue vendor_macro = 21;
// Packager macro is what gets inserted as the packager in the RPM
google.protobuf.StringValue packager_macro = 22;
// specify a build pool type in additional to build pool architecture
google.protobuf.StringValue build_pool_type = 23;
}
// A repository is a yum repository that yumrepofs maintains
// for this specific project
// Repositories hold packages. All projects have a repository named "all"
message Repository {
// Unique identifier of format UUID v4
string id = 1;
// When project was created
google.protobuf.Timestamp created_at = 2;
google.protobuf.StringValue name = 3;
google.protobuf.StringValue project_id = 4;
repeated string packages = 5;
// Whether an RPM from a package should be excluded from the repository
// Format: {name}.{arch}
repeated string exclude_filter = 6;
// Whether an RPM from a package should be included in the repository
// If list contains a NA that is in exclude_list as well, then it will
// be excluded.
repeated string include_list = 7;
// Force multilib on non-prepopulated, non-devel and non-runtime package
// Only accepts complete RPM name (e.g. valgrind)
repeated string additional_multilib = 8;
// Exclude multilib even if devel or runtime
// Accepts glob (e.g. valgrind*)
repeated string exclude_multilib_filter = 9;
// Architectures that should be multilib enabled
repeated string multilib = 10;
// Exclude filter that supports globs
repeated string glob_include_filter = 11;
}
message CreateProjectRequest {
// Project to create
Project project = 1;
}
message CreateProjectResponse {
// Created project
Project project = 1;
}
message UpdateProjectRequest {
string project_id = 1;
// Project to update
Project project = 2;
}
message UpdateProjectResponse {
// Updated project
Project project = 1;
}
message ProjectFilters {
google.protobuf.StringValue id = 1;
google.protobuf.StringValue name = 2;
repeated string ids = 3;
}
message ListProjectsRequest {
// The page to request
int64 page = 1;
// Maximum amount of results to return
// Minimum: 1
// Maximum: 100
int32 limit = 2;
}
message ListProjectsResponse {
repeated Project projects = 1;
int64 total = 2;
int64 current = 3;
int64 page = 4;
}
message GetProjectRequest {
google.protobuf.StringValue id = 1;
}
message GetProjectResponse {
Project project = 1;
}
message ListRepositoriesRequest {
google.protobuf.StringValue project_id = 1;
}
message ListRepositoriesResponse {
repeated Repository repositories = 1;
}
message GetRepositoryRequest {
google.protobuf.StringValue project_id = 1;
google.protobuf.StringValue id = 2;
}
message GetRepositoryResponse {
Repository repository = 1;
}
message GetProjectCredentialsRequest {
google.protobuf.StringValue project_id = 1;
}
message GetProjectCredentialsResponse {
google.protobuf.StringValue gitlab_username = 1;
}
message SetProjectCredentialsRequest {
google.protobuf.StringValue project_id = 1;
google.protobuf.StringValue gitlab_username = 2;
google.protobuf.StringValue gitlab_password = 3;
}
message SetProjectCredentialsResponse {
google.protobuf.StringValue gitlab_username = 1;
}
message SyncCatalogRequest {
google.protobuf.StringValue project_id = 1;
google.protobuf.StringValue scm_url = 2;
google.protobuf.StringValue branch = 3;
}
message CreateHashedRepositoriesRequest {
google.protobuf.StringValue project_id = 1;
repeated string repositories = 2;
}
message CreateHashedRepositoriesTask {
repeated string repo_revisions = 1;
}
message CloneSwapRequest {
google.protobuf.StringValue target_project_id = 1;
google.protobuf.StringValue src_project_id = 2;
}
message CloneSwapTask {
string target_project_id = 1;
string src_project_id = 2;
repeated string build_ids_layered = 3;
}
message LookasideFileUploadRequest {
string file = 1;
}
message LookasideFileUploadResponse {
string digest = 1;
}
message ExternalRepository {
string url = 1;
int32 priority = 2;
bool module_hotfixes = 3;
}
message ListExternalRepositoriesRequest {
google.protobuf.StringValue project_id = 1;
}
message ListExternalRepositoriesResponse {
repeated ExternalRepository repositories = 1;
}
message CreateExternalRepositoryRequest {
google.protobuf.StringValue project_id = 1;
google.protobuf.StringValue url = 2;
google.protobuf.Int32Value priority = 3;
bool module_hotfixes = 4;
}
message CreateExternalRepositoryResponse {
ExternalRepository repository = 1;
}
message DeleteExternalRepositoryRequest {
google.protobuf.StringValue project_id = 1;
google.protobuf.StringValue id = 2;
}
message DeleteExternalRepositoryResponse {}

20
third_party/peridot/proto/v1/rpm.proto vendored Normal file
View File

@ -0,0 +1,20 @@
syntax = "proto3";
package resf.peridot.v1;
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
message RpmArtifactMetadata {
bytes primary = 1;
bytes filelists = 2;
bytes other = 3;
bytes updateinfo = 4;
repeated string exclude_arch = 5;
repeated string exclusive_arch = 6;
repeated string build_arch = 7;
}
message RpmRepoMetadata {
// Repositories mapped to architecture
map<string, RpmArtifactMetadata> repositories = 1;
}

View File

@ -0,0 +1,37 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/any.proto";
import "google/protobuf/wrappers.proto";
import "third_party/peridot/proto/v1/package.proto";
import "third_party/peridot/proto/v1/project.proto";
import "google/api/annotations.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
service SearchService {
rpc Search (SearchRequest) returns (stream SearchResponse) {
option (google.api.http) = {
post: "/v1/search"
body: "*"
};
}
}
message SearchRequest {
string query = 1;
google.protobuf.StringValue project_id = 2;
}
message SearchResponse {
repeated google.protobuf.Any hits = 1;
}
message SearchHitPackages {
repeated Package packages = 1;
}
message SearchHitProjects {
repeated Project projects = 1;
}

244
third_party/peridot/proto/v1/task.proto vendored Normal file
View File

@ -0,0 +1,244 @@
syntax = "proto3";
package resf.peridot.v1;
import "google/protobuf/descriptor.proto";
import "google/protobuf/any.proto";
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "google/api/httpbody.proto";
import "google/rpc/error_details.proto";
import "google/api/annotations.proto";
option go_package = "peridot.resf.org/peridot/pb;peridotpb";
extend google.protobuf.MethodOptions {
TaskInfo task_info = 9991;
}
service TaskService {
// ListTasks returns a list of tasks from all projects
// List mode won't return task responses. The reason being
// responses being able to reach huge sizes.
// To get the response for a specific task, you can use GetTask,
// either on the specific subtask or the parent task.
rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/tasks"
};
}
// GetTask returns a specific task with the given ID
rpc GetTask(GetTaskRequest) returns (GetTaskResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/tasks/{id=*}"
};
}
// StreamTaskLogs streams the logs of a specific task with the given ID
rpc StreamTaskLogs(StreamTaskLogsRequest) returns (stream google.api.HttpBody) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/tasks/{id=*}/logs"
};
}
// CancelTask cancels a task with the given ID.
// Only parent tasks can be cancelled and if they're in the PENDING or RUNNING state.
rpc CancelTask(CancelTaskRequest) returns (CancelTaskResponse) {
option (google.api.http) = {
post: "/v1/projects/{project_id=*}/tasks/{id=*}/cancel"
};
}
}
enum TaskType {
TASK_TYPE_UNKNOWN = 0;
TASK_TYPE_IMPORT = 1;
TASK_TYPE_IMPORT_SRC_GIT = 2;
TASK_TYPE_IMPORT_SRC_GIT_TO_DIST_GIT = 3;
TASK_TYPE_IMPORT_DOWNSTREAM = 4;
TASK_TYPE_IMPORT_UPSTREAM = 5;
TASK_TYPE_BUILD = 6;
TASK_TYPE_BUILD_SRPM = 7;
TASK_TYPE_BUILD_ARCH = 8;
TASK_TYPE_BUILD_SRPM_UPLOAD = 9;
TASK_TYPE_BUILD_ARCH_UPLOAD = 10;
TASK_TYPE_WORKER_PROVISION = 11;
TASK_TYPE_WORKER_DESTROY = 12;
TASK_TYPE_YUMREPOFS_UPDATE = 13;
TASK_TYPE_KEYKEEPER_SIGN_ARTIFACT = 14;
TASK_TYPE_SYNC_CATALOG = 15;
TASK_TYPE_RPM_IMPORT = 16;
TASK_TYPE_CREATE_HASHED_REPOSITORIES = 17;
TASK_TYPE_LOOKASIDE_FILE_UPLOAD = 18;
TASK_TYPE_RPM_LOOKASIDE_BATCH_IMPORT = 19;
TASK_TYPE_CLONE_SWAP = 20;
TASK_TYPE_UPDATEINFO = 21;
}
enum TaskStatus {
TASK_STATUS_UNSPECIFIED = 0;
TASK_STATUS_PENDING = 1;
TASK_STATUS_RUNNING = 2;
TASK_STATUS_SUCCEEDED = 3;
TASK_STATUS_FAILED = 4;
TASK_STATUS_CANCELED = 5;
}
// Subtask is a singular subunit of an AsyncTask.
// This represents a single sub-action of a wider
// system of AsyncTasks
message Subtask {
// Architecture this task was executed in
string arch = 1;
// The type of task that is being executed
TaskType type = 2;
// Response of the task
google.protobuf.Any response = 3;
// Any metadata for the task
// If failed, should contain TaskErrorDetails
google.protobuf.Any metadata = 4;
// Status of the task
TaskStatus status = 5;
// The parent task (which usually is the primary)
google.protobuf.StringValue parent_task_id = 6;
// Unique identifier of the task
string id = 7;
// ID of submitter
google.protobuf.StringValue submitter_id = 8;
// Display name of submitter
google.protobuf.StringValue submitter_display_name = 9;
// Email of submitter
google.protobuf.StringValue submitter_email = 10;
// Finished time of the task
google.protobuf.Timestamp finished_at = 11;
// Created time of the task
google.protobuf.Timestamp created_at = 12;
}
message TaskArtifact {
// The task ID this artifact belongs to
string task_id = 1;
// Name of artifact (this refers to object name).
// To retrieve the file name, filepath.Base should
// always be applied to this property.
string name = 2;
// SHA-256 hash of the artifact
string hash_sha256 = 3;
// Architecture of artifact
string arch = 4;
// Any extra metadata this artifact has
google.protobuf.Any metadata = 5;
}
// AsyncTask represents a collection of subunits of tasks that wholly
// works towards a specific goal.
message AsyncTask {
string task_id = 1;
// Subtasks contains all subtasks for a given task
// Usually the requested task itself is represented as a subtask.
// The primary subtask is always the first in order
repeated Subtask subtasks = 2;
bool done = 3;
}
message GetTaskRequest {
string id = 1;
google.protobuf.StringValue project_id = 2;
}
message GetTaskResponse {
AsyncTask task = 1;
}
message ListTasksRequest {
google.protobuf.StringValue project_id = 1;
// The page to request
int32 page = 2;
// Maximum amount of results to return
// Minimum: 1
// Maximum: 100
int32 limit = 3;
}
message ListTasksResponse {
repeated AsyncTask tasks = 1;
// Total packages from server
int64 total = 2;
// Limit from request
int32 size = 3;
// Current page
int32 page = 4;
}
message StreamTaskLogsRequest {
string project_id = 1;
string id = 2;
bool parent = 3;
}
message CancelTaskRequest {
string project_id = 1;
string id = 2;
}
message CancelTaskResponse {}
message TaskCancelledDetails {
string code = 1;
string cause = 2;
google.protobuf.Any extra = 3;
}
message ProvisionWorkerMetadata {
string name = 1;
string purpose = 2;
string task_id = 3;
}
message TaskErrorDetails {
google.rpc.ErrorInfo error_info = 1;
oneof error_type {
google.rpc.RetryInfo retry_info = 2;
google.rpc.DebugInfo debug_info = 3;
google.rpc.QuotaFailure quota_failure = 4;
google.rpc.PreconditionFailure precondition_failure = 5;
google.rpc.BadRequest bad_request = 6;
google.rpc.Help help = 7;
google.rpc.LocalizedMessage localized_message = 8;
}
}
// TaskInfo is a option tag to supply information about an async task.
// This is currently ignored silently, but all clients are encouraged
// to implement this.
message TaskInfo {
// Response type is the data format that the parent task stores
// about the response for the specific async/long running task.
string response_type = 1;
// Metadata type is the data format that the parent task stores
// about the metadatra for the specific async/long running task.
string metadata_type = 2;
}

View File

@ -0,0 +1,35 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
proto_library(
name = "yumrepofspb_proto",
srcs = ["yumrepofs.proto"],
visibility = ["//visibility:public"],
deps = [
"@go_googleapis//google/api:httpbody_proto",
"@googleapis//google/api:annotations_proto",
],
)
go_proto_library(
name = "yumrepofspb_go_proto",
compilers = [
"@io_bazel_rules_go//proto:go_grpc",
"//:go_gen_grpc_gateway",
],
importpath = "peridot.resf.org/peridot/yumrepofs/pb",
proto = ":yumrepofspb_proto",
visibility = ["//visibility:public"],
deps = [
"@go_googleapis//google/api:httpbody_go_proto",
"@org_golang_google_genproto//googleapis/api/annotations",
],
)
go_library(
name = "pb",
embed = [":yumrepofspb_go_proto"],
importpath = "peridot.resf.org/peridot/yumrepofs/pb",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,126 @@
syntax = "proto3";
package resf.peridot.yumrepofs.v1;
import "google/api/annotations.proto";
import "google/api/httpbody.proto";
option go_package = "peridot.resf.org/peridot/yumrepofs/pb;yumrepofspb";
service YumrepofsService {
rpc GetRpm(GetRpmRequest) returns (GetRpmResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repo/{repo_name=*}/{arch=*}/Packages/{parent_task_id=*}/{file_name=**}"
};
}
rpc GetBlob(GetBlobRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repo/{repo_name=*}/{arch=*}/repodata/{blob=*}"
};
}
rpc GetRepoMd(GetRepoMdRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repo/{repo_name=*}/{arch=*}/repodata/repomd.xml"
};
}
rpc GetRepoMdSignature(GetRepoMdRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repo/{repo_name=*}/{arch=*}/repodata/repomd.xml.asc"
};
}
rpc GetPublicUrl(GetPublicUrlRequest) returns (GetPublicUrlResponse) {
option (google.api.http) = {
get: "/v1/public_url"
};
}
rpc GetPublicKey(GetPublicKeyRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repo/{repo_name=*}/{arch=*}/RPM-GPG-KEY"
};
}
rpc GetUrlMappings(GetUrlMappingsRequest) returns (GetUrlMappingsResponse) {
option (google.api.http) = {
get: "/v1/projects/{project_id=*}/repo/{repo_name=*}/{arch=*}/url_mappings"
};
}
}
message GetRpmRequest {
string project_id = 1;
string repo_name = 2;
string arch = 3;
string parent_task_id = 4;
string file_name = 5;
}
message GetRpmResponse {
string redirect_url = 1;
}
message GetRepoMdRequest {
string project_id = 1;
string repo_name = 2;
string arch = 3;
}
message GetBlobRequest {
string project_id = 1;
string repo_name = 2;
string arch = 3;
string blob = 4;
}
message GetPublicUrlRequest {}
message GetPublicUrlResponse {
string public_url = 1;
}
message RepositoryChange {
// Name of the repository
string name = 1;
// Added packages
repeated string added_packages = 2;
// Modified packages
repeated string modified_packages = 3;
// Removed packages
repeated string removed_packages = 4;
// Added modules
repeated string added_modules = 5;
// Modified modules
repeated string modified_modules = 6;
// Removed modules
repeated string removed_modules = 7;
}
message UpdateRepoTask {
// List of repo changes
repeated RepositoryChange changes = 1;
}
message GetPublicKeyRequest {
string project_id = 1;
string repo_name = 2;
string arch = 3;
}
message GetUrlMappingsRequest {
string project_id = 1;
string repo_name = 2;
string arch = 3;
}
message GetUrlMappingsResponse {
map<string, string> url_mappings = 1;
}

27
tools/kernelmanager/BUILD vendored Normal file
View File

@ -0,0 +1,27 @@
# Copyright 2023 Peridot Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("//devtools/taskrunner2:defs.bzl", "taskrunner2")
taskrunner2(
name = "kernelmanager",
dev_frontend_flags = True,
targets = [
"//devtools/devtemporal",
"//devtools/devdex",
],
watch_targets = [
"//tools/kernelmanager/cmd/kernelmanager_dev",
],
)

View File

@ -0,0 +1,20 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "kernel_repack_test_lib",
srcs = ["main.go"],
importpath = "go.resf.org/peridot/tools/kernelmanager/cmd/kernel_repack_test",
visibility = ["//visibility:private"],
deps = [
"//tools/kernelmanager/kernel_repack/kernelorg",
"//tools/kernelmanager/kernel_repack/v1:kernel_repack",
"//vendor/github.com/go-git/go-billy/v5/osfs",
"//vendor/github.com/pkg/errors",
],
)
go_binary(
name = "kernel_repack_test",
embed = [":kernel_repack_test_lib"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,48 @@
package main
import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/pkg/errors"
"go.resf.org/peridot/tools/kernelmanager/kernel_repack/kernelorg"
repack_v1 "go.resf.org/peridot/tools/kernelmanager/kernel_repack/v1"
"os"
"time"
)
const exportDir = "/tmp/kernel_repack_test"
func main() {
ltVersion, ltTarball, _, err := kernelorg.GetLatestLT("6.1.")
if err != nil {
panic(errors.Wrap(err, "failed to get latest LT version"))
}
// BuildID should be YYYYMMDDHHMM
buildID := time.Now().Format("200601021504")
out, err := repack_v1.LT(&repack_v1.Input{
Version: ltVersion,
BuildID: buildID,
KernelPackage: "kernel-lt",
Tarball: ltTarball,
})
if err != nil {
panic(errors.Wrap(err, "failed to repack LT kernel"))
}
// Create output directory, but first remove it if it already exists
err = os.RemoveAll(exportDir)
if err != nil {
panic(errors.Wrap(err, "failed to remove output directory"))
}
err = os.MkdirAll(exportDir, 0755)
if err != nil {
panic(errors.Wrap(err, "failed to create output directory"))
}
err = out.ToFS(osfs.New(exportDir))
if err != nil {
panic(errors.Wrap(err, "failed to write output files"))
}
}

View File

@ -0,0 +1,23 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "kernelmanager_dev_lib",
srcs = ["main.go"],
importpath = "go.resf.org/peridot/tools/kernelmanager/cmd/kernelmanager_dev",
visibility = ["//visibility:private"],
deps = [
"//base/go",
"//base/go/kv/dynamodb",
"//tools/kernelmanager/rpc",
"//tools/kernelmanager/ui",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
"//vendor/github.com/urfave/cli/v2:cli",
"//vendor/go.temporal.io/sdk/client",
],
)
go_binary(
name = "kernelmanager_dev",
embed = [":kernelmanager_dev_lib"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,128 @@
// Copyright 2023 Peridot Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main implements the dev server for KernelManager.
// This runs the services just like it would be structured in production. (The RESF way)
// This means:
// - localhost:9111 serves the UI
// - localhost:9111/api serves the API
package main
import (
"fmt"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/urfave/cli/v2"
base "go.resf.org/peridot/base/go"
"go.resf.org/peridot/base/go/kv/dynamodb"
kernelmanager_rpc "go.resf.org/peridot/tools/kernelmanager/rpc"
kernelmanager_ui "go.resf.org/peridot/tools/kernelmanager/ui"
"go.temporal.io/sdk/client"
"net/http"
"os"
)
var (
apiGrpcPort = 3734
)
func setupUi(ctx *cli.Context) (*base.FrontendInfo, error) {
info := base.FlagsToFrontendInfo(ctx)
assets := kernelmanager_ui.InitFrontendInfo(info)
info.NoRun = true
info.Self = "http://localhost:9111"
err := base.FrontendServer(info, assets)
if err != nil {
return nil, err
}
return info, nil
}
func setupApi(ctx *cli.Context) (*runtime.ServeMux, error) {
oidcInterceptorDetails, err := base.FlagsToOidcInterceptorDetails(ctx)
if err != nil {
return nil, err
}
oidcInterceptorDetails.AllowUnauthenticated = true
temporalClient, err := base.GetTemporalClientFromFlags(ctx, client.Options{})
if err != nil {
return nil, err
}
kv, err := dynamodb.New("http://localhost:4566", "kernelmanager")
if err != nil {
return nil, err
}
s, err := kernelmanager_rpc.NewServer(
kv,
temporalClient,
oidcInterceptorDetails,
base.WithGRPCPort(apiGrpcPort),
base.WithNoGRPCGateway(),
base.WithNoMetrics(),
)
go func() {
err := s.Start()
if err != nil {
base.LogFatalf("failed to start kernelmanager_api: %v", err)
}
}()
return s.GatewayMux(), nil
}
func run(ctx *cli.Context) error {
info, err := setupUi(ctx)
if err != nil {
return err
}
apiMux, err := setupApi(ctx)
if err != nil {
return err
}
http.HandleFunc("/api/", http.StripPrefix("/api", apiMux).ServeHTTP)
handler := func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.Header.Set("x-peridot-beta", "true")
h.ServeHTTP(w, r)
})
}
// Start server
port := 9111
base.LogInfof("Starting server on port %d", port)
return http.ListenAndServe(fmt.Sprintf(":%d", port), handler(info.MuxHandler))
}
func main() {
app := &cli.App{
Name: "kernelmanager_dev",
Action: run,
Flags: base.WithFlags(
base.WithDatabaseFlags("kernelmanager"),
base.WithTemporalFlags("", "kernelmanager_queue"),
base.WithFrontendAuthFlags(""),
),
}
if err := app.Run(os.Args); err != nil {
base.LogFatalf("failed to start kernelmanager_dev: %v", err)
}
}

View File

@ -0,0 +1,21 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "kernelmanager_server_lib",
srcs = ["main.go"],
importpath = "go.resf.org/peridot/tools/kernelmanager/cmd/kernelmanager_server",
visibility = ["//visibility:private"],
deps = [
"//base/go",
"//base/go/kv/dynamodb",
"//tools/kernelmanager/rpc",
"//vendor/github.com/urfave/cli/v2:cli",
"//vendor/go.temporal.io/sdk/client",
],
)
go_binary(
name = "kernelmanager_server",
embed = [":kernelmanager_server_lib"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,65 @@
package main
import (
"github.com/urfave/cli/v2"
base "go.resf.org/peridot/base/go"
"go.resf.org/peridot/base/go/kv/dynamodb"
kernelmanager_rpc "go.resf.org/peridot/tools/kernelmanager/rpc"
"go.temporal.io/sdk/client"
"os"
)
func run(ctx *cli.Context) error {
oidcInterceptorDetails, err := base.FlagsToOidcInterceptorDetails(ctx)
if err != nil {
return err
}
oidcInterceptorDetails.AllowUnauthenticated = true
temporalClient, err := base.GetTemporalClientFromFlags(ctx, client.Options{})
if err != nil {
return err
}
kv, err := dynamodb.New(ctx.String("dynamodb-table"))
if err != nil {
return err
}
s, err := kernelmanager_rpc.NewServer(
kv,
temporalClient,
oidcInterceptorDetails,
base.FlagsToGRPCServerOptions(ctx)...,
)
if err != nil {
return err
}
return s.Start()
}
func main() {
app := &cli.App{
Name: "kernelmanager_server",
Action: run,
Flags: base.WithFlags(
base.WithDatabaseFlags("kernelmanager"),
base.WithTemporalFlags("", "kernelmanager_queue"),
base.WithGrpcFlags(6677),
base.WithGatewayFlags(6678),
base.WithOidcFlags("", "releng"),
[]cli.Flag{
&cli.StringFlag{
Name: "dynamodb-table",
Usage: "DynamoDB table name",
EnvVars: []string{"DYNAMODB_TABLE"},
Value: "kernelmanager",
},
},
),
}
if err := app.Run(os.Args); err != nil {
base.LogFatalf("failed to start mship_api: %v", err)
}
}

View File

@ -0,0 +1,19 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "kernelmanager_ui_lib",
srcs = ["main.go"],
importpath = "go.resf.org/peridot/tools/kernelmanager/cmd/kernelmanager_ui",
visibility = ["//visibility:private"],
deps = [
"//base/go",
"//tools/kernelmanager/ui",
"//vendor/github.com/urfave/cli/v2:cli",
],
)
go_binary(
name = "kernelmanager_ui",
embed = [":kernelmanager_ui_lib"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,41 @@
// Copyright 2023 Peridot Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/urfave/cli/v2"
base "go.resf.org/peridot/base/go"
kernelmanager_ui "go.resf.org/peridot/tools/kernelmanager/ui"
"os"
)
func run(ctx *cli.Context) error {
info := base.FlagsToFrontendInfo(ctx)
assets := kernelmanager_ui.InitFrontendInfo(info)
return base.FrontendServer(info, assets)
}
func main() {
app := &cli.App{
Name: "kernelmanager_ui",
Action: run,
Flags: base.WithFrontendFlags(9112),
}
if err := app.Run(os.Args); err != nil {
base.LogFatalf("failed to start kernelmanager_ui: %v", err)
}
}

View File

@ -0,0 +1,24 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "kernelmanager_worker_lib",
srcs = ["main.go"],
importpath = "go.resf.org/peridot/tools/kernelmanager/cmd/kernelmanager_worker",
visibility = ["//visibility:private"],
deps = [
"//base/go",
"//base/go/forge/gitlab",
"//base/go/kv/dynamodb",
"//base/go/storage/detector",
"//tools/kernelmanager/worker",
"//vendor/github.com/urfave/cli/v2:cli",
"//vendor/go.temporal.io/sdk/client",
"//vendor/go.temporal.io/sdk/worker",
],
)
go_binary(
name = "kernelmanager_worker",
embed = [":kernelmanager_worker_lib"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,117 @@
// Copyright 2023 Peridot Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
_ "embed"
"github.com/urfave/cli/v2"
base "go.resf.org/peridot/base/go"
"go.resf.org/peridot/base/go/forge/gitlab"
"go.resf.org/peridot/base/go/kv/dynamodb"
storage_detector "go.resf.org/peridot/base/go/storage/detector"
kernelmanager_worker "go.resf.org/peridot/tools/kernelmanager/worker"
"go.temporal.io/sdk/client"
"go.temporal.io/sdk/worker"
"os"
)
func run(ctx *cli.Context) error {
temporalClient, err := base.GetTemporalClientFromFlags(ctx, client.Options{})
if err != nil {
return err
}
kv, err := dynamodb.New(ctx.String("dynamodb-endpoint"), ctx.String("dynamodb-table"))
if err != nil {
return err
}
gitlabForge := gitlab.New(
ctx.String("gitlab-host"),
"",
ctx.String("gitlab-username"),
ctx.String("gitlab-password"),
"RESF KernelManager",
"releng+kernelmanager@rockylinux.org",
true,
)
st, err := storage_detector.FromFlags(ctx)
if err != nil {
return err
}
w := worker.New(temporalClient, ctx.String("temporal-task-queue"), worker.Options{})
workerServer := kernelmanager_worker.New(
kv,
gitlabForge,
st,
)
// Register workflows
w.RegisterWorkflow(kernelmanager_worker.TriggerKernelUpdateWorkflow)
// Register activities
w.RegisterActivity(workerServer)
// Start worker
return w.Run(worker.InterruptCh())
}
func main() {
flags := base.WithFlags(
base.WithTemporalFlags("", "kernelmanager_queue"),
base.WithStorageFlags(),
[]cli.Flag{
&cli.StringFlag{
Name: "dynamodb-endpoint",
Usage: "DynamoDB endpoint",
EnvVars: []string{"DYNAMODB_ENDPOINT"},
},
&cli.StringFlag{
Name: "dynamodb-table",
Usage: "DynamoDB table name",
EnvVars: []string{"DYNAMODB_TABLE"},
Value: "kernelmanager",
},
&cli.StringFlag{
Name: "gitlab-host",
Usage: "GitLab host",
EnvVars: []string{"GITLAB_HOST"},
Value: "git.rockylinux.org",
},
&cli.StringFlag{
Name: "gitlab-username",
Usage: "GitLab username",
EnvVars: []string{"GITLAB_USERNAME"},
},
&cli.StringFlag{
Name: "gitlab-password",
Usage: "GitLab password",
EnvVars: []string{"GITLAB_PASSWORD"},
},
},
)
app := &cli.App{
Name: "kernelmanager_worker",
Action: run,
Flags: flags,
}
if err := app.Run(os.Args); err != nil {
base.LogFatalf("failed to run kernelmanager_worker: %v", err)
}
}

View File

@ -0,0 +1,9 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "kernel_repack",
srcs = ["repack.go"],
importpath = "go.resf.org/peridot/tools/kernelmanager/kernel_repack",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/go-git/go-billy/v5:go-billy"],
)

View File

@ -0,0 +1,19 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "kernelorg",
srcs = [
"kernelorg.go",
"releaseatom.go",
],
embedsrcs = [
"gregkh.asc",
"torvalds.asc",
],
importpath = "go.resf.org/peridot/tools/kernelmanager/kernel_repack/kernelorg",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/xi2/xz",
"//vendor/golang.org/x/crypto/openpgp",
],
)

View File

@ -0,0 +1,51 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBE58tdUBEADY5iQsoL4k8l06dNt+uP2lH8IPi14M51/tOHsW1ZNc8Iok0stH
+uA8w0LpN97UgNhsvXFEkIK2JjLalasUTiUoIeeTshD9t+ekFBx5a9SbLCFlBrDS
TwfieK2xalzomoL22N5ztj1XbdLWh6NRM6kKMeYvgAGo8p884WJk4pPIJK6G0wEw
e9/TG6ilRSLOtxyaF9yZ+FC1eOA1S47Ld2K25Y5GsQF5agwi7nES+9tVVBZp97kB
8IOvELeiSiY0xFXi60yfwIlK6x9dfcxsx5nCyrp2qdqQiPiMD0EJMiuA6wymoi5W
XtmfCpweTB8TvW8Y8uqrwYApzmDleBDTIDP0vCY1o9eftJcWWMkRKC9c7Ziy4nT6
TzmVkNXgqC8/BuOQbpU7I/1VCMoa6e+2a8jrgy5to4dGgu6xQ6jTxWbvgDeB6Hct
WGqf8f9s5lSpH8D8OZLDOXKolqnBd5YrJr0Qmpq4cCcIqwNCMbURtsTpbW/EdWl+
AKwnStXXLI5O6Hg+m4c3O8ZwbzcnAOgTJePm2Xoi71t9SbAZZx1/W7p6/57UGrXR
Q4WfiwpOPD0siF33yO2L7G7Gmm4zh8ieX8aS8guqfWFhuSsDta77F2FB9ozD9WN0
Z5tJowiy3Z1VkxvZjZH8IbcB05yBBBV47BJxrPnSuDT+w45yNTqZ6m4VYwARAQAB
tCZHcmVnIEtyb2FoLUhhcnRtYW4gPGdyZWdraEBrZXJuZWwub3JnPokCTgQTAQgA
OBYhBGR/KGVIlOO9RXGZvjjbvchgkmk+BQJaHvNAAhsDBQsJCAcCBhUICQoLAgQW
AgMBAh4BAheAAAoJEDjbvchgkmk+TLEQAJ1Ux/6n//f2jEVBdWb13qYFBBxKJMNe
TU9yPMedQAAhrt68IU1Bt8+/nmZLm1iXWOvPQ01921i3HBxANnbTqEYYYWnQJJyR
OiyTuwY7HWlguQXlkxLa1mahVuFee6DHO+O8IGU8IM+PHdEL08e629sIluu3WGmN
XXJ307j47UBu3QFA67YQ7YBmChl7AHBcSpKSplgN82tbAYtrm5ywYHM5uMFhmbw/
DJpzLdFsnzRT9E7PKhH+q1MyPojGT4Oytj3D1QZrhp8yZ+Zp8TQnleXeBczLfpQP
duzurqVomZpWwIZLHCgBJRWmz7/M0kTDIndQle9LVcJtJqasrRmgL3NsKrYYBw+j
HnBe2hp8aq6W3DVaUmkSdshran9ZCaLCpxt62NAgUkI/eg1sSljo1aeXmF33ymYI
pxavW5CGUYKlqYRLUT7en6t/mFiYCwPD22KOdLSfsvVG+pr4UNsfSZdIF+W9/FLW
7HJVZGMIldsrGFv4lOtqiXdbRafMtylYw/mU+xhu9+NslRRrbi1TlWS/BH7ULYu9
zKahApf1DFRcrx0PyvtlFleoDZa88uIbmcUO8GzZXEhejTv9vNnbmjgvYsRywFcJ
PkJ/TObfasvvSU9GZn6aU36Y7GYSUGjD1anLiUpr0FKkruymqBdXHaXGJ44GZ8Hh
d5ZMTavwEX7BuQINBE58tdUBEACyIiG/54lsujOhnwTLf9hAVqS7hPhWFXlEBqRo
Vw9q1ITqPVKaMQQ/2OZnOTmibFS938kmEJXVRTmk+z7tCdTeRVyAXJILW093oPkx
g/ViHycumaVowSn+iuH68E8EaSwSqmYXAP+/Cs8RR3kR41eHBFWVTaEbVai2Lk5W
3ZZB+htmMYAoqMQH6r7Bo7INhEu15esc97mgR9QxV+0ti1x5Ax55aFjk2g0Xv+Id
lZxR3++sbCQoLdrHiafdZCPG4bkewlKzuZpETIZ6f8/b+r3NqnDL/BDCuthYgWqh
DWioOemoPCHLo2q9WMFxu6GITrmIlsahqSVMIOfofx3mMOUPLntFMBzwlGUdeKc0
AisWoETIm4rnzpUulEWwnARuOHXRQLdr6qqKSKNOvZqMX6rejhooOfXGL+z0jHx1
9uXVdNTbPtkdXqO6opLZFftwkkrP3VAK3E8cqL7mnSAFGmQCd16rEtxRs2bD2DGD
TPX2majnxepphpgmUSTDBElrlo3PGZ1QL+Jbsg9fL5bK/yfaztLdvLODycqiRqMx
FywS7bGglYocQRzGO0Cv6QtI0Z4i6Xp4gwx2PO+UyIht565rUfOJ51vVjThurbmi
HAeoQ/w5kq+9q0aDRKQmv3k+14gjsphh0uYFkfveEdfida0GW3dU/IEqv50EJbiS
CO6M+wARAQABiQIfBBgBAgAJBQJOfLXVAhsMAAoJEDjbvchgkmk+MFMQAL7qdYsq
5R3HIdkR38aZhUQWDTsLZqRMSQcvilMw2ekYzE0xxW8N2K5JFwNXDEKGxdr08ZzW
bOQdiN7trKfwA9THhcQ/zHubXm2XIyUwR/AwXyjEcBcF932x+F+zHCw+l2DCBM+1
aNHt7E8wlamTNuzZj2a4Vh49OS0CJYi3lLsl+eL2MC+uoZbz7jT4RLCLmm1RmGIV
VneGWn4XCNFdyxmPyicAWauKapc1TxCzJMXLwhoYtfRg3hd9WfPaZboFdNtO5CIx
W/tde6F+BXs5btQRqXAvbNdmXfa4ctysy6sAqMQIG2S7wsGhnxCw0opDyYKQTr6Y
dYEwKV9df+2tv5HzmGBFPobZ6k5uJOepE65ZUV/ggoMUNmmdd3HWhbR08wDp52CT
RI10tApRMsSF156C58I3Li3E01W95DR+ywKEf8dSZm0daJ4SDgf0cBSXFE9/5pSR
bV1GnBgkkgqQrS9B3sFNGK9Li+TcedYu6TRuGnCCTbXis9TzMmmGSxE1WP3rjWdQ
P88mjSrO2P0gm2uauMjpg3P7JEilEHz+8kPColOrVJnlgGn6lLjSPeOjNEUbCp8Z
nOILll8r1FsHa6fgAGH83JGVOIBiEVVikrxugkyg7NB7WeVtXAN/pa3Lq+h6Sss5
Oe7hjkaljharkBETLNKF3RynOcOebKM4gpZU
=4cHa
-----END PGP PUBLIC KEY BLOCK-----

View File

@ -0,0 +1,130 @@
package kernelorg
import (
"bytes"
"compress/gzip"
_ "embed"
"fmt"
"github.com/xi2/xz"
"golang.org/x/crypto/openpgp"
"io"
"net/http"
)
//go:embed gregkh.asc
var gregKHPublicKey []byte
//go:embed torvalds.asc
var torvaldsPublicKey []byte
func getKeyring() (openpgp.EntityList, error) {
var entityList openpgp.EntityList
keys := [][]byte{gregKHPublicKey, torvaldsPublicKey}
for _, key := range keys {
keyRing, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(key))
if err != nil {
return nil, err
}
entityList = append(entityList, keyRing...)
}
return entityList, nil
}
func verifyTarball(tarball []byte, gz bool, signature []byte) (*openpgp.Entity, error) {
// unpack tarball
var unpackedTarball []byte
if gz {
gzipRead, err := gzip.NewReader(bytes.NewReader(tarball))
if err != nil {
return nil, err
}
unpackedTarball, err = io.ReadAll(gzipRead)
if err != nil {
return nil, err
}
} else {
xzRead, err := xz.NewReader(bytes.NewReader(tarball), 0)
if err != nil {
return nil, err
}
unpackedTarball, err = io.ReadAll(xzRead)
if err != nil {
return nil, err
}
}
keyRing, err := getKeyring()
if err != nil {
return nil, err
}
entity, err := openpgp.CheckArmoredDetachedSignature(keyRing, bytes.NewReader(unpackedTarball), bytes.NewReader(signature))
if err != nil {
return nil, err
}
return entity, nil
}
func download(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return io.ReadAll(resp.Body)
}
func downloadLT(majorVersion string) (string, []byte, *openpgp.Entity, error) {
latestVersion, err := GetLTVersion(majorVersion)
if err != nil {
return "", nil, nil, err
}
firstDigit := latestVersion[0:1]
downloadURL := fmt.Sprintf("https://cdn.kernel.org/pub/linux/kernel/v%s.x/linux-%s.tar.xz", firstDigit, latestVersion)
tarball, err := download(downloadURL)
if err != nil {
return "", nil, nil, err
}
signatureURL := fmt.Sprintf("https://cdn.kernel.org/pub/linux/kernel/v%s.x/linux-%s.tar.sign", firstDigit, latestVersion)
signature, err := download(signatureURL)
if err != nil {
return "", nil, nil, err
}
entity, err := verifyTarball(tarball, false, signature)
if err != nil {
return "", nil, nil, err
}
return latestVersion, tarball, entity, nil
}
func GetLatestML() (string, []byte, *openpgp.Entity, error) {
latestVersion, err := GetLatestMLVersion()
if err != nil {
return "", nil, nil, err
}
downloadURL := fmt.Sprintf("https://git.kernel.org/torvalds/t/linux-%s.tar.gz", latestVersion)
tarball, err := download(downloadURL)
if err != nil {
return "", nil, nil, err
}
// ML RC doesn't contain signature, so we're relying on TLS
return latestVersion, tarball, nil, nil
}
func GetLatestLT(prefix string) (string, []byte, *openpgp.Entity, error) {
return downloadLT(prefix)
}

View File

@ -0,0 +1,90 @@
package kernelorg
import (
"encoding/xml"
"errors"
"net/http"
"strings"
)
type releaseAtomItem struct {
Title string `xml:"title"`
}
type releaseAtomChannel struct {
Items []*releaseAtomItem `xml:"item"`
}
type releaseAtom struct {
// RSS feed
// https://www.kernel.org/feeds/kdist.xml
XMLName xml.Name `xml:"rss"`
Channel *releaseAtomChannel `xml:"channel"`
}
const atomURL = "https://www.kernel.org/feeds/kdist.xml"
var (
ErrNoRelease = errors.New("no release")
ErrNotFound = errors.New("not found")
)
func GetLatestMLVersion() (string, error) {
f, err := http.Get(atomURL)
if err != nil {
return "", err
}
defer f.Body.Close()
var atom releaseAtom
err = xml.NewDecoder(f.Body).Decode(&atom)
if err != nil {
return "", err
}
if atom.Channel == nil {
return "", ErrNoRelease
}
if len(atom.Channel.Items) == 0 {
return "", ErrNoRelease
}
for _, item := range atom.Channel.Items {
if strings.HasSuffix(item.Title, ": mainline") {
return strings.TrimSuffix(item.Title, ": mainline"), nil
}
}
return "", ErrNotFound
}
func GetLTVersion(majorVersion string) (string, error) {
f, err := http.Get(atomURL)
if err != nil {
return "", err
}
defer f.Body.Close()
var atom releaseAtom
err = xml.NewDecoder(f.Body).Decode(&atom)
if err != nil {
return "", err
}
if atom.Channel == nil {
return "", ErrNoRelease
}
if len(atom.Channel.Items) == 0 {
return "", ErrNoRelease
}
for _, item := range atom.Channel.Items {
if strings.HasPrefix(item.Title, majorVersion) && strings.HasSuffix(item.Title, ": longterm") {
return strings.TrimSuffix(item.Title, ": longterm"), nil
}
}
return "", ErrNotFound
}

View File

@ -0,0 +1,30 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQENBE55CJIBCACkn+aOLmsaq1ejUcXCAOXkO3w7eiLqjR/ziTL2KZ30p7bxP8cT
UXvfM7fwE7EnqCCkji25x2xsoKXB8AlUswIEYUFCOupj2BOsVmJ/rKZW7fCvKTOK
+BguKjebDxNbgmif39bfSnHDWrW832f5HrYmZn7a/VySDQFdul8Gl/R6gs6PHJbg
jjt+K7Px6cQVMVNvY/VBWdvA1zckO/4h6gf3kWWZN+Wlq8wv/pxft8QzNFgweH9o
5bj4tnQ+wMCLCLiDsgEuVawoOAkg3dRMugIUoiKoBKw7b21q9Vjp4jezRvciC6Ys
4kGUSFG1ZjIn3MpY3f3xZ3yuYwrxQ8JcA7KTABEBAAG0JExpbnVzIFRvcnZhbGRz
IDx0b3J2YWxkc0BrZXJuZWwub3JnPokBTgQTAQgAOBYhBKuvEcZaKXCxMKvjxHm+
PkMAQRiGBQJaHxkTAhsDBQsJCAcCBhUICQoLAgQWAgMBAh4BAheAAAoJEHm+PkMA
QRiGzMcH/ieyxrsHR0ng3pi+qy1/sLiTT4WEBN53+1FsGWdP6/DCD3sprFdWDkkB
Dfh9vPCVzPqX7siZMJxw3+wOfjNnGBRiGj7mTE/1XeXJHDwFRyBEVa/bY8ExLKbv
Bf+xpiWOg2Myj5RYaOUBFbOEtfTPob0FtvfZvK3PXkjODTHhDH7QJT2zNPivHG+E
R5VyF1yJEpl10rDTM91NhEeV0n4wpfZkgL8a3JSzo9H2AJX3y35+Dk9wtNge440Z
SVWAnjwxhBLX2R0LUszRhU925c0vP2l20eFncBmAT0NKpn7v9a670WHv45PluG+S
KKktf6b5/BtfqpC3eV58I6FEtSVpM1u5AQ0ETnkIkgEIAN+ybgD0IlgKRPJ3eksa
fd+KORseBWwxUy3GH0yAg/4jZCsfHZ7jpbRKzxNTKW1kE6ClSqehUsuXT5Vc1eh6
079erN3y+JNxl6zZPC9v+5GNyc28qSfNejt4wmwa/y86T7oQfgo77o8Gu/aO/xzO
jw7jSDDR3u9p/hFVtsqzptxZzvs3hVaiLS+0mar9qYZheaCUqOXOKVo38Vg5gkOh
MEwKvZs9x3fINU/t8ckxOHq6KiLap5Bq87XP0ZJsCaMBwdLYhOFxAiEVtlzwyo3D
vMplIahqqNELb71YDhpMq/Hu+42oR3pqASCPLfO/0GUSdAGXJVhv7L7ng02ETSBm
VOUAEQEAAYkBHwQYAQIACQUCTnkIkgIbDAAKCRB5vj5DAEEYhuobB/9Fi1GVG5qn
Pq14S0WKYEW3N891L37LaXmDh977r/j2dyZOoYIiV4rx6a6urhq9UbcgNw/ke01T
NM4y7EhW/lFnxJQXSMjdsXGcb9HwUevDk2FMV1h9gkHLlqRUlTpjVdQwTB9wMd4b
WhZsxybTnGh6o8dCwBEaGNsHsSBYO81OXrTE/fcZEgKCeKW2xdKRiazu6Mu5WLU6
gBy2nOc6oL2zKJZjACfllQzBx5+6z2N4Sj0JBOobz4RR2JLElMEckMbdqbIS+c+n
02ItMmCORgakf74k+TEbaZx3ZTVHnhvqQqanZz1i4I5IwHJxkUsYLddgYrylZH+M
wNDlB5u3I138
=RrrU
-----END PGP PUBLIC KEY BLOCK-----

View File

@ -0,0 +1,74 @@
package kernel_repack
import (
"github.com/go-git/go-billy/v5"
"os"
)
type File struct {
Name string
Data []byte
}
type Output struct {
Spec *File
Tarball []byte
TarballSha256 string
Metadata *File
OtherFiles []*File
}
func (o *Output) ToFS(fs billy.Filesystem) error {
// Create directories first
dirs := []string{"SPECS", "SOURCES"}
for _, dir := range dirs {
err := fs.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
// Create SOURCES files
for _, file := range o.OtherFiles {
f, err := fs.OpenFile("SOURCES/"+file.Name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
if err != nil {
return err
}
_, err = f.Write(file.Data)
if err != nil {
return err
}
err = f.Close()
if err != nil {
return err
}
}
// Create SPEC file
f, err := fs.OpenFile("SPECS/"+o.Spec.Name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(o.Spec.Data)
if err != nil {
return err
}
// Create metadata file
f, err = fs.OpenFile(o.Metadata.Name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(o.Metadata.Data)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,27 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "kernel_repack",
srcs = ["v1.go"],
embedsrcs = [
"data/config-x86_64",
"data/cpupower.config",
"data/cpupower.service",
"data/filter-modules.sh",
"data/filter-x86_64.sh",
"data/lt.spec",
"data/ml.spec",
"data/mod-extra.list",
"data/config-aarch64",
"data/filter-aarch64.sh",
"data/kvm_stat.logrotate",
"data/mod-denylist.sh",
"data/mod-sign.sh",
"data/rockykpatch1.x509",
"data/x509.genkey",
"data/rockydup1.x509",
],
importpath = "go.resf.org/peridot/tools/kernelmanager/kernel_repack/v1",
visibility = ["//visibility:public"],
deps = ["//tools/kernelmanager/kernel_repack"],
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
# See 'cpupower help' and cpupower(1) for more info
CPUPOWER_START_OPTS="frequency-set -g performance"
CPUPOWER_STOP_OPTS="frequency-set -g ondemand"

View File

@ -0,0 +1,13 @@
[Unit]
Description=Configure CPU power related settings
After=syslog.target
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/sysconfig/cpupower
ExecStart=/usr/bin/cpupower $CPUPOWER_START_OPTS
ExecStop=/usr/bin/cpupower $CPUPOWER_STOP_OPTS
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,18 @@
#! /bin/bash
# This is the aarch64 override file for the core/drivers package split. The
# module directories listed here and in the generic list in filter-modules.sh
# will be moved to the resulting kernel-modules package for this arch.
# Anything not listed in those files will be in the kernel-core package.
#
# Please review the default list in filter-modules.sh before making
# modifications to the overrides below. If something should be removed across
# all arches, remove it in the default instead of per-arch.
driverdirs="atm auxdisplay bcma bluetooth firewire fmc infiniband isdn leds media memstick message mmc mtd mwave nfc ntb pcmcia platform power ssb staging tty uio uwb w1"
ethdrvs="3com adaptec arc alteon atheros broadcom cadence calxeda chelsio cisco dec dlink emulex icplus marvell micrel myricom neterion nvidia oki-semi packetengines qlogic rdc renesas sfc silan sis smsc stmicro sun tehuti ti via wiznet xircom"
drmdrvs="amd arm bridge ast exynos hisilicon i2c imx mgag200 meson msm nouveau panel radeon rockchip tegra sun4i tinydrm vc4"
singlemods="ntb_netdev iscsi_ibft iscsi_boot_sysfs megaraid pmcraid qedi qla1280 9pnet_rdma rpcrdma nvmet-rdma nvme-rdma hid-picolcd hid-prodikeys hwa-hc hwpoison-inject target_core_user sbp_target cxgbit iw_cxgb3 iw_cxgb4 cxgb3i cxgb3i cxgb3i_ddp cxgb4i chcr chtls"

View File

@ -0,0 +1,169 @@
#! /bin/bash
#
# Called as filter-modules.sh list-of-modules Arch
# This script filters the modules into the kernel-core and kernel-modules
# subpackages. We list out subsystems/subdirs to prune from the installed
# module directory. What is left is put into the kernel-core package. What is
# pruned is contained in the kernel-modules package.
#
# This file contains the default subsys/subdirs to prune from all architectures.
# If an architecture needs to differ, we source a per-arch filter-<arch>.sh file
# that contains the set of override lists to be used instead. If a module or
# subsys should be in kernel-modules on all arches, please change the defaults
# listed here.
# Overrides is individual modules which need to remain in kernel-core due to deps.
overrides="cec"
# Set the default dirs/modules to filter out
driverdirs="atm auxdisplay bcma bluetooth firewire fmc iio infiniband isdn leds media memstick mfd mmc mtd nfc ntb pcmcia platform power ssb staging tty uio uwb w1"
chardrvs="mwave pcmcia"
netdrvs="appletalk can dsa hamradio ieee802154 irda ppp slip usb wireless"
ethdrvs="3com adaptec alteon amd aquantia atheros broadcom cadence calxeda chelsio cisco dec dlink emulex icplus marvell neterion nvidia oki-semi packetengines qlogic rdc renesas sfc silan sis smsc stmicro sun tehuti ti wiznet xircom"
inputdrvs="gameport tablet touchscreen"
scsidrvs="aacraid aic7xxx aic94xx be2iscsi bfa bnx2i bnx2fc csiostor cxgbi esas2r fcoe fnic hisi_sas isci libsas lpfc megaraid mpt2sas mpt3sas mvsas pm8001 qla2xxx qla4xxx sym53c8xx_2 ufs qedf"
usbdrvs="atm image misc serial wusbcore"
fsdrvs="affs befs cifs coda cramfs ecryptfs hfs hfsplus jfs minix ncpfs nilfs2 ocfs2 reiserfs romfs squashfs sysv ubifs ufs"
netprots="6lowpan appletalk atm ax25 batman-adv bluetooth can dccp dsa ieee802154 irda l2tp mac80211 mac802154 mpls netrom nfc rds rfkill rose sctp smc wireless"
drmdrvs="amd ast gma500 i2c i915 mgag200 nouveau radeon via "
singlemods="ntb_netdev iscsi_ibft iscsi_boot_sysfs megaraid pmcraid qedi qla1280 9pnet_rdma rpcrdma nvmet-rdma nvme-rdma hid-picolcd hid-prodikeys hwa-hc hwpoison-inject hid-sensor-hub target_core_user sbp_target cxgbit iw_cxgb3 iw_cxgb4 cxgb3i cxgb3i cxgb3i_ddp cxgb4i chcr chtls parport_serial ism regmap-sdw regmap-sdw-mbq arizona-micsupp hid-asus"
# Grab the arch-specific filter list overrides
source ./filter-$2.sh
filter_dir() {
filelist=$1
dir=$2
grep -v -e "${dir}/" ${filelist} > ${filelist}.tmp
if [ $? -ne 0 ]
then
echo "Couldn't remove ${dir}. Skipping."
else
grep -e "${dir}/" ${filelist} >> k-d.list
mv ${filelist}.tmp $filelist
fi
return 0
}
filter_ko() {
filelist=$1
mod=$2
grep -v -e "${mod}.ko" ${filelist} > ${filelist}.tmp
if [ $? -ne 0 ]
then
echo "Couldn't remove ${mod}.ko Skipping."
else
grep -e "${mod}.ko" ${filelist} >> k-d.list
mv ${filelist}.tmp $filelist
fi
return 0
}
# Filter the drivers/ subsystems
for subsys in ${driverdirs}
do
filter_dir $1 drivers/${subsys}
done
# Filter the networking drivers
for netdrv in ${netdrvs}
do
filter_dir $1 drivers/net/${netdrv}
done
# Filter the char drivers
for char in ${chardrvs}
do
filter_dir $1 drivers/char/${char}
done
# Filter the ethernet drivers
for eth in ${ethdrvs}
do
filter_dir $1 drivers/net/ethernet/${eth}
done
# SCSI
for scsi in ${scsidrvs}
do
filter_dir $1 drivers/scsi/${scsi}
done
# Input
for input in ${inputdrvs}
do
filter_dir $1 drivers/input/${input}
done
# USB
for usb in ${usbdrvs}
do
filter_dir $1 drivers/usb/${usb}
done
# Filesystems
for fs in ${fsdrvs}
do
filter_dir $1 fs/${fs}
done
# Network protocols
for prot in ${netprots}
do
filter_dir $1 kernel/net/${prot}
done
# DRM
for drm in ${drmdrvs}
do
filter_dir $1 drivers/gpu/drm/${drm}
done
# Just kill sound.
filter_dir $1 kernel/sound
filter_dir $1 kernel/drivers/soundwire
# Now go through and filter any single .ko files that might have deps on the
# things we filtered above
for mod in ${singlemods}
do
filter_ko $1 ${mod}
done
# Now process the override list to bring those modules back into core
for mod in ${overrides}
do
grep -v -e "/${mod}.ko" k-d.list > k-d.list.tmp
if [ $? -ne 0 ]
then
echo "Couldn't save ${mod}.ko Skipping."
else
grep -e "/${mod}.ko" k-d.list >> $filelist
mv k-d.list.tmp k-d.list
fi
done
# Go through our generated drivers list and remove the .ko files. We'll
# restore them later.
for mod in `cat k-d.list`
do
rm -rf $mod
done

View File

@ -0,0 +1,12 @@
#! /bin/bash
# This is the x86_64 override file for the core/drivers package split. The
# module directories listed here and in the generic list in filter-modules.sh
# will be moved to the resulting kernel-modules package for this arch.
# Anything not listed in those files will be in the kernel-core package.
#
# Please review the default list in filter-modules.sh before making
# modifications to the overrides below. If something should be removed across
# all arches, remove it in the default instead of per-arch.
# Defaults work so no need to override

View File

@ -0,0 +1,11 @@
/var/log/kvm_stat.csv {
size 10M
missingok
compress
maxage 30
rotate 5
nodateext
postrotate
/usr/bin/systemctl try-restart kvm_stat.service
endscript
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,164 @@
#! /bin/bash
# shellcheck disable=SC2164
RpmDir=$1
ModDir=$2
Dir="$1/$2"
# Note the list filename must have the format mod-[PACKAGE].list, for example,
# mod-internal.list or mod-extra.list. The PACKAGE is used to create a
# override directory for the modules.
List=$3
Dest="$4"
blacklist()
{
cat > "$RpmDir/etc/modprobe.d/$1-blacklist.conf" <<-__EOF__
# This kernel module can be automatically loaded by non-root users. To
# enhance system security, the module is blacklisted by default to ensure
# system administrators make the module available for use as needed.
# See https://access.redhat.com/articles/3760101 for more details.
#
# Remove the blacklist by adding a comment # at the start of the line.
blacklist $1
__EOF__
}
check_blacklist()
{
mod=$(find "$RpmDir/$ModDir" -name "$1")
[ ! "$mod" ] && return 0
if modinfo "$mod" | grep -q '^alias:\s\+net-'; then
mod="${1##*/}"
mod="${mod%.ko*}"
echo "$mod has an alias that allows auto-loading. Blacklisting."
blacklist "$mod"
fi
}
find_depends()
{
dep=$1
depends=$(modinfo "$dep" | sed -n -e "/^depends/ s/^depends:[ \t]*//p")
[ -z "$depends" ] && exit
for mod in ${depends//,/ }
do
match=$(grep "^$mod.ko" "$ListName")
[ -z "$match" ] && continue
# check if the module we are looking at is in mod-* too.
# if so we do not need to mark the dep as required.
mod2=${dep##*/} # same as $(basename $dep), but faster
match2=$(grep "^$mod2" "$ListName")
if [ -n "$match2" ]
then
#echo $mod2 >> notreq.list
continue
fi
echo "$mod".ko >> req.list
done
}
foreachp()
{
P=$(nproc)
bgcount=0
while read -r mod; do
$1 "$mod" &
bgcount=$((bgcount + 1))
if [ $bgcount -eq "$P" ]; then
wait -n
bgcount=$((bgcount - 1))
fi
done
wait
}
# Destination was specified on the command line
test -n "$4" && echo "$0: Override Destination $Dest has been specified."
pushd "$Dir"
OverrideDir=$(basename "$List")
OverrideDir=${OverrideDir%.*}
OverrideDir=${OverrideDir#*-}
mkdir -p "$OverrideDir"
rm -rf modnames
find . -name "*.ko" -type f > modnames
# Look through all of the modules, and throw any that have a dependency in
# our list into the list as well.
rm -rf dep.list dep2.list
rm -rf req.list req2.list
touch dep.list req.list
cp "$List" .
# This variable needs to be exported because it is used in sub-script
# executed by xargs
ListName=$(basename "$List")
export ListName
foreachp find_depends < modnames
sort -u req.list > req2.list
sort -u "$ListName" > modules2.list
join -v 1 modules2.list req2.list > modules3.list
while IFS= read -r mod
do
# get the path for the module
modpath=$(grep /"$mod" modnames)
[ -z "$modpath" ] && continue
echo "$modpath" >> dep.list
done < modules3.list
sort -u dep.list > dep2.list
if [ -n "$Dest" ]; then
# now move the modules into the $Dest directory
while IFS= read -r mod
do
newpath=$(dirname "$mod" | sed -e "s/kernel\\//$Dest\//")
mkdir -p "$newpath"
mv "$mod" "$newpath"
echo "$mod" | sed -e "s/kernel\\//$Dest\//" | sed -e "s|^.|${ModDir}|g" >> "$RpmDir"/"$ListName"
done < dep2.list
fi
popd
# If we're signing modules, we can't leave the .mod files for the .ko files
# we've moved in .tmp_versions/. Remove them so the Kbuild 'modules_sign'
# target doesn't try to sign a non-existent file. This is kinda ugly, but
# so are the modules-* packages.
while IFS= read -r mod
do
modfile=$(basename "$mod" | sed -e 's/.ko/.mod/')
rm -f .tmp_versions/"$modfile"
done < "$Dir"/dep2.list
if [ -z "$Dest" ]; then
sed -e "s|^.|${ModDir}|g" "$Dir"/dep2.list > "$RpmDir/$ListName"
echo "./$RpmDir/$ListName created."
[ -d "$RpmDir/etc/modprobe.d/" ] || mkdir -p "$RpmDir/etc/modprobe.d/"
foreachp check_blacklist < "$List"
fi
# Many BIOS-es export a PNP-id which causes the floppy driver to autoload
# even though most modern systems don't have a 3.5" floppy driver anymore
# this replaces the old die_floppy_die.patch which removed the PNP-id from
# the module
floppylist=("$RpmDir"/"$ModDir"/kernel/drivers/block/floppy.ko*)
if [[ -n ${floppylist[0]} && -f ${floppylist[0]} ]]; then
blacklist "floppy"
fi
# avoid an empty kernel-extra package
echo "$ModDir/$OverrideDir" >> "$RpmDir/$ListName"
pushd "$Dir"
rm -f modnames dep.list dep2.list req.list req2.list
rm -f "$ListName" modules2.list modules3.list
popd

View File

@ -0,0 +1,192 @@
6pack.ko
a3d.ko
act200l-sir.ko
actisys-sir.ko
adi.ko
aer_inject.ko
af_802154.ko
affs.ko
ali-ircc.ko
analog.ko
appletalk.ko
atm.ko
avma1_cs.ko
avm_cs.ko
avmfritz.ko
ax25.ko
b1.ko
bas_gigaset.ko
batman-adv.ko
baycom_par.ko
baycom_ser_fdx.ko
baycom_ser_hdx.ko
befs.ko
bpqether.ko
br2684.ko
capi.ko
c_can.ko
c_can_platform.ko
clip.ko
cobra.ko
coda.ko
cuse.ko
db9.ko
dccp_diag.ko
dccp_ipv4.ko
dccp_ipv6.ko
dccp.ko
dccp_probe.ko
diva_idi.ko
divas.ko
ds1wm.ko
ds2482.ko
ds2490.ko
dss1_divert.ko
elsa_cs.ko
ems_pci.ko
ems_usb.ko
esd_usb2.ko
esi-sir.ko
gamecon.ko
gf2k.ko
gigaset.ko
girbil-sir.ko
grip.ko
grip_mp.ko
guillemot.ko
hdlcdrv.ko
hfc4s8s_l1.ko
hfcmulti.ko
hfcpci.ko
hisax.ko
hwa-rc.ko
hysdn.ko
i2400m.ko
i2400m-sdio.ko
i2400m-usb.ko
ieee802154.ko
iforce.ko
interact.ko
ipddp.ko
ipx.ko
isdn.ko
joydump.ko
kingsun-sir.ko
ks959-sir.ko
ksdazzle-sir.ko
kvaser_pci.ko
l2tp_core.ko
l2tp_debugfs.ko
l2tp_eth.ko
l2tp_ip.ko
l2tp_ip6.ko
l2tp_netlink.ko
l2tp_ppp.ko
lec.ko
ma600-sir.ko
magellan.ko
mcp2120-sir.ko
mISDN_core.ko
mISDN_dsp.ko
mkiss.ko
mptbase.ko
mptctl.ko
mptfc.ko
nci.ko
ncpfs.ko
netjet.ko
netrom.ko
nfc.ko
nilfs2.ko
ocfs2_dlmfs.ko
ocfs2_dlm.ko
ocfs2.ko
ocfs2_nodemanager.ko
ocfs2_stackglue.ko
ocfs2_stack_o2cb.ko
ocfs2_stack_user.ko
old_belkin-sir.ko
orinoco_cs.ko
orinoco.ko
orinoco_nortel.ko
orinoco_pci.ko
orinoco_plx.ko
orinoco_usb.ko
plx_pci.ko
pn_pep.ko
pppoatm.ko
rds.ko
rds_rdma.ko
rds_tcp.ko
rose.ko
sch_atm.ko
sch_cbq.ko
sch_choke.ko
sch_drr.ko
sch_dsmark.ko
sch_gred.ko
sch_mqprio.ko
sch_multiq.ko
sch_netem.ko
sch_qfq.ko
sch_red.ko
sch_sfb.ko
sch_teql.ko
sctp.ko
sctp_diag.ko
sctp_probe.ko
sidewinder.ko
sja1000.ko
sja1000_platform.ko
slcan.ko
slip.ko
softing_cs.ko
softing.ko
spaceball.ko
spaceorb.ko
stinger.ko
sysv.ko
tcp_bic.ko
tcp_highspeed.ko
tcp_htcp.ko
tcp_hybla.ko
tcp_illinois.ko
tcp_lp.ko
tcp_scalable.ko
tcp_vegas.ko
tcp_veno.ko
tcp_westwood.ko
tcp_yeah.ko
tekram-sir.ko
tmdc.ko
toim3232-sir.ko
trancevibrator.ko
turbografx.ko
twidjoy.ko
ubifs.ko
ufs.ko
umc.ko
usbip-core.ko
usbip-host.ko
uwb.ko
vcan.ko
vhci-hcd.ko
w1_bq27000.ko
w1_ds2408.ko
w1_ds2423.ko
w1_ds2431.ko
w1_ds2433.ko
w1_ds2760.ko
w1_ds2780.ko
w1_ds2781.ko
w1_ds28e04.ko
w1_smem.ko
w1_therm.ko
w6692.ko
walkera0701.ko
wanrouter.ko
warrior.ko
whci.ko
wire.ko
yam.ko
zhenhua.ko

View File

@ -0,0 +1,37 @@
#! /bin/bash
# The modules_sign target checks for corresponding .o files for every .ko that
# is signed. This doesn't work for package builds which re-use the same build
# directory for every variant, and the .config may change between variants.
# So instead of using this script to just sign lib/modules/$KernelVer/extra,
# sign all .ko in the buildroot.
# This essentially duplicates the 'modules_sign' Kbuild target and runs the
# same commands for those modules.
MODSECKEY=$1
MODPUBKEY=$2
moddir=$3
modules=$(find "$moddir" -type f -name '*.ko')
NPROC=$(nproc)
[ -z "$NPROC" ] && NPROC=1
# NB: this loop runs 2000+ iterations. Try to be fast.
echo "$modules" | xargs -r -n16 -P $NPROC sh -c "
for mod; do
./scripts/sign-file sha256 $MODSECKEY $MODPUBKEY \$mod
rm -f \$mod.sig \$mod.dig
done
" DUMMYARG0 # xargs appends ARG1 ARG2..., which go into $mod in for loop.
RANDOMMOD=$(echo "$modules" | sort -R | head -n 1)
if [ "~Module signature appended~" != "$(tail -c 28 "$RANDOMMOD")" ]; then
echo "*****************************"
echo "*** Modules are unsigned! ***"
echo "*****************************"
exit 1
fi
exit 0

View File

@ -0,0 +1,17 @@
[ req ]
default_bits = 3072
distinguished_name = req_distinguished_name
prompt = no
x509_extensions = myexts
[ req_distinguished_name ]
O = Rocky
CN = Rocky kernel signing key
emailAddress = security@rockylinux.org
[ myexts ]
basicConstraints=critical,CA:FALSE
keyUsage=digitalSignature
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid

View File

@ -0,0 +1,141 @@
package repack_v1
import (
"bytes"
"crypto/sha256"
"embed"
"encoding/hex"
"fmt"
"go.resf.org/peridot/tools/kernelmanager/kernel_repack"
"io"
"strings"
"text/template"
)
//go:embed data/*
var Data embed.FS
type ChangelogEntry struct {
Date string
Name string
Version string
BuildID string
Text string
}
type Input struct {
Version string
BuildID string
KernelPackage string
Changelog []*ChangelogEntry
Tarball []byte
}
func kernel(kernelType string, in *Input) (*kernel_repack.Output, error) {
var spec *kernel_repack.File
var files []*kernel_repack.File
dir, err := Data.ReadDir("data")
if err != nil {
return nil, err
}
for _, file := range dir {
if strings.HasSuffix(file.Name(), ".spec") {
if file.Name() != kernelType+".spec" {
continue
}
// Read spec file
f, err := Data.Open("data/" + file.Name())
if err != nil {
return nil, err
}
defer f.Close()
specBytes, err := io.ReadAll(f)
if err != nil {
return nil, err
}
specName := fmt.Sprintf("%s.spec", in.KernelPackage)
spec = &kernel_repack.File{
Name: specName,
Data: specBytes,
}
continue
}
// Read other files
f, err := Data.Open("data/" + file.Name())
if err != nil {
return nil, err
}
defer f.Close()
data, err := io.ReadAll(f)
if err != nil {
return nil, err
}
files = append(files, &kernel_repack.File{
Name: file.Name(),
Data: data,
})
}
// Get sha256sum of tarball
hash := sha256.New()
_, err = hash.Write(in.Tarball)
if err != nil {
return nil, err
}
sum := hex.EncodeToString(hash.Sum(nil))
// Create .metadata file
suffix := "xz"
if in.Tarball[0] == 0x1f && in.Tarball[1] == 0x8b {
suffix = "gz"
}
metadata := fmt.Sprintf("%s SOURCES/linux-%s.tar.%s", sum, in.Version, suffix)
// Create .[kernelpackage].metadata
metadataName := fmt.Sprintf(".%s.metadata", in.KernelPackage)
// Replace placeholders in spec file
var buf bytes.Buffer
txtTemplate, err := template.New("spec").Parse(string(spec.Data))
if err != nil {
return nil, err
}
err = txtTemplate.Execute(&buf, in)
if err != nil {
return nil, err
}
spec.Data = buf.Bytes()
output := &kernel_repack.Output{
Spec: spec,
Tarball: in.Tarball,
TarballSha256: sum,
Metadata: &kernel_repack.File{
Name: metadataName,
Data: []byte(metadata),
},
OtherFiles: files,
}
return output, nil
}
// LT creates a new kernel package for the LT kernel
// Returns spec and SOURCE files
func LT(in *Input) (*kernel_repack.Output, error) {
return kernel("lt", in)
}
// ML creates a new kernel package for the ML kernel
// Returns spec and SOURCE files
func ML(in *Input) (*kernel_repack.Output, error) {
return kernel("ml", in)
}

52
tools/kernelmanager/proto/v1/BUILD vendored Normal file
View File

@ -0,0 +1,52 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
load("//tools/build_rules/oapi_gen:defs.bzl", "oapi_gen_ts")
proto_library(
name = "kernelmanagerpb_proto",
srcs = [
"changelog.proto",
"kernel.proto",
"kernel_state.proto",
"kernelmanager.proto",
"update.proto",
],
visibility = ["//visibility:public"],
deps = [
"@com_google_protobuf//:field_mask_proto",
"@com_google_protobuf//:timestamp_proto",
"@go_googleapis//google/api:annotations_proto",
"@go_googleapis//google/longrunning:longrunning_proto",
"@googleapis//google/api:annotations_proto",
],
)
oapi_gen_ts(
name = "kernelmanagerpb_ts_proto",
proto = ":kernelmanagerpb_proto",
visibility = ["//visibility:public"],
)
go_proto_library(
name = "kernelmanagerpb_go_proto",
compilers = [
"@io_bazel_rules_go//proto:go_grpc",
"//:go_gen_grpc_gateway",
],
importpath = "go.resf.org/peridot/tools/kernelmanager/pb",
proto = ":kernelmanagerpb_proto",
visibility = ["//visibility:public"],
deps = [
"//third_party/googleapis/google/longrunning:longrunning_go_proto",
"@go_googleapis//google/api:annotations_go_proto",
"@org_golang_google_genproto//googleapis/api/annotations",
],
)
go_library(
name = "pb",
embed = [":kernelmanagerpb_go_proto"],
importpath = "go.resf.org/peridot/tools/kernelmanager/pb",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,35 @@
syntax = "proto3";
package peridot.tools.kernelmanager.v1;
import "google/protobuf/timestamp.proto";
import "tools/kernelmanager/proto/v1/kernel.proto";
option java_multiple_files = true;
option java_outer_classname = "ChangelogProto";
option java_package = "org.resf.peridot.tools.kernelmanager.v1";
option go_package = "go.resf.org/peridot/tools/kernelmanager/pb;kernelmanagerpb";
// Changelog entry in kernel spec
message ChangelogEntry {
// Changelog entry
string text = 1;
// Changelog entry author
string author = 2;
// Changelog entry date
google.protobuf.Timestamp create_time = 3;
// Version of the kernel this changelog entry belongs to
string version = 4;
// Build ID of the kernel this changelog entry belongs to
string build_id = 5;
}
// Changelog for a kernel
message Changelog {
// Changelog entries
repeated ChangelogEntry entries = 1;
}

View File

@ -0,0 +1,99 @@
syntax = "proto3";
package peridot.tools.kernelmanager.v1;
import "google/api/annotations.proto";
option java_multiple_files = true;
option java_outer_classname = "KernelProto";
option java_package = "org.resf.peridot.tools.kernelmanager.v1";
option go_package = "go.resf.org/peridot/tools/kernelmanager/pb;kernelmanagerpb";
// RepackOptions contains options for repackaging the kernel.
// This is only repackaging from Kernel.org.
message RepackOptions {
// The variant of the kernel to be repackaged.
enum KernelOrgVariant {
// Unknown variant.
KERNEL_ORG_VARIANT_UNSPECIFIED = 0;
// Mainline variant.
MAINLINE = 1;
// Longterm variant.
LONGTERM = 2;
}
// The variant of the kernel to be repackaged.
KernelOrgVariant kernel_org_variant = 1;
// The version of the kernel to be repackaged.
// Only valid for LONGTERM variant.
string kernel_org_version = 2;
// How often to repack the kernel.
enum Frequency {
// Unknown frequency.
FREQUENCY_UNSPECIFIED = 0;
// Repack every 24 hours.
DAILY = 1;
// Repack every 7 days.
WEEKLY = 2;
// Repack every 30 days.
MONTHLY = 3;
}
// How often to repack the kernel.
Frequency frequency = 3;
// Repack version
enum Version {
// Unknown version.
VERSION_UNSPECIFIED = 0;
// V1 version.
V1 = 1;
}
// Repack version
Version version = 4;
}
// PeridotProject contains information about the Peridot project
// This is where the builds are sent to (or watched for).
message PeridotProject {
// ID of the project.
// Package name MUST match the Kernel package name.
string id = 1;
}
// Config contains the configuration on how to facilitate an update
// for a kernel.
message Config {
// Whether the kernel should be repackaged from Kernel.org
RepackOptions repack_options = 1;
// Peridot project this kernel is being built for.
PeridotProject peridot_project = 2;
// Whether the kernel needs Secure Boot signing.
bool secure_boot = 3;
// Namespace in SCM where the kernel is stored.
string scm_namespace = 4;
// SCM branches to push the kernel to.
repeated string scm_branches = 5;
}
// Kernel is the representation of a kernel.
message Kernel {
// The name of the kernel.
string name = 1;
// The kernel config.
Config config = 2;
// The package name in Peridot
string pkg = 3;
}

View File

@ -0,0 +1,42 @@
syntax = "proto3";
package peridot.tools.kernelmanager.v1;
import "google/api/annotations.proto";
import "google/api/field_behavior.proto";
import "google/protobuf/timestamp.proto";
import "tools/kernelmanager/proto/v1/update.proto";
option java_multiple_files = true;
option java_outer_classname = "KernelStateProto";
option java_package = "org.resf.peridot.tools.kernelmanager.v1";
option go_package = "go.resf.org/peridot/tools/kernelmanager/pb;kernelmanagerpb";
// KernelState represents the state of a kernel.
message KernelState {
// The last time the kernel was updated.
google.protobuf.Timestamp last_update_time = 1;
// Kernel state
enum State {
// Unspecified state
STATE_UNSPECIFIED = 0;
// Kernel has no active updates
IDLE = 1;
// Kernel is being updated
UPDATING = 2;
// Kernel is pending Secure Boot artifacts
PENDING_SECURE_BOOT = 3;
// Kernel is uploading Secure Boot artifacts to Peridot
UPLOADING_SECURE_BOOT = 4;
}
// Kernel current active state
State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Kernel current active update
Update update = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}

View File

@ -0,0 +1,173 @@
syntax = "proto3";
package peridot.tools.kernelmanager.v1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "google/longrunning/operations.proto";
import "tools/kernelmanager/proto/v1/kernel.proto";
import "tools/kernelmanager/proto/v1/update.proto";
option java_multiple_files = true;
option java_outer_classname = "KernelManagerProto";
option java_package = "org.resf.peridot.tools.kernelmanager.v1";
option go_package = "go.resf.org/peridot/tools/kernelmanager/pb;kernelmanagerpb";
// KernelManager is the service that manages kernels.
// It helps with the repackaging of upstream kernels, as well as facilitating
// the SB signing process for both Core and SIG kernels.
service KernelManager {
// ListKernels returns a list of all kernels.
rpc ListKernels(ListKernelsRequest) returns (ListKernelsResponse) {
option (google.api.http) = {
get: "/v1/kernels"
};
}
// GetKernel returns a kernel by name.
rpc GetKernel(GetKernelRequest) returns (Kernel) {
option (google.api.http) = {
get: "/v1/{name=kernels/*}"
};
option (google.api.method_signature) = "name";
}
// CreateKernel creates a new kernel.
rpc CreateKernel(CreateKernelRequest) returns (Kernel) {
option (google.api.http) = {
post: "/v1/kernels"
body: "kernel"
};
}
// UpdateKernel updates an existing kernel.
rpc UpdateKernel(UpdateKernelRequest) returns (Kernel) {
option (google.api.http) = {
patch: "/v1/{kernel.name=kernels/*}"
body: "kernel"
};
option (google.api.method_signature) = "kernel,update_mask";
}
// TriggerKernelUpdate triggers an update of a kernel.
// For any repack kernels that are on a schedule, this will trigger an immediate
// update of the kernel. And the update window will be reset.
rpc TriggerKernelUpdate(TriggerKernelUpdateRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=kernels/*}:triggerUpdate"
};
option (google.longrunning.operation_info) = {
response_type: "TriggerKernelUpdateResponse"
metadata_type: "KernelManagerMetadata"
};
}
// ListUpdates returns a list of all kernel updates.
rpc ListUpdates(ListUpdatesRequest) returns (ListUpdatesResponse) {
option (google.api.http) = {
get: "/v1/updates"
};
}
// GetUpdate returns a kernel update by name.
rpc GetUpdate(GetUpdateRequest) returns (Update) {
option (google.api.http) = {
get: "/v1/{name=updates/*}"
};
option (google.api.method_signature) = "name";
}
}
// ListKernelsRequest is the request message for ListKernels.
message ListKernelsRequest {
// The maximum number of kernels to return.
// If not specified, the server will pick an appropriate default.
int32 page_size = 1;
// A page token, received from a previous `ListKernels` call.
// Provide this to retrieve the subsequent page.
// When paginating, all other parameters provided to `ListKernels` must match
// the call that provided the page token.
string page_token = 2;
}
// ListKernelsResponse is the response message for ListKernels.
message ListKernelsResponse {
// The list of kernels.
repeated Kernel kernels = 1;
// A token that can be sent as `page_token` to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
string next_page_token = 2;
}
// GetKernelRequest is the request message for GetKernel.
message GetKernelRequest {
// The name of the kernel to retrieve.
string name = 1 [(google.api.field_behavior) = REQUIRED];
}
// CreateKernelRequest is the request message for CreateKernel.
message CreateKernelRequest {
// The kernel to create.
Kernel kernel = 1 [(google.api.field_behavior) = REQUIRED];
}
// UpdateKernelRequest is the request message for UpdateKernel.
message UpdateKernelRequest {
// The kernel to update.
Kernel kernel = 1 [(google.api.field_behavior) = REQUIRED];
}
// TriggerKernelUpdateRequest is the request message for TriggerKernelUpdate.
message TriggerKernelUpdateRequest {
// The name of the kernel to update.
string name = 1 [(google.api.field_behavior) = REQUIRED];
}
// TriggerKernelUpdateResponse is the response message for TriggerKernelUpdate.
message TriggerKernelUpdateResponse {
// Updated kernel.
Update update = 1;
}
// KernelManagerMetadata is the metadata message for TriggerKernelUpdate.
message KernelManagerMetadata {
// The time at which the workflow started
google.protobuf.Timestamp start_time = 1;
// The time at which the workflow finished
google.protobuf.Timestamp end_time = 2;
}
// ListUpdatesRequest is the request message for ListUpdates.
message ListUpdatesRequest {
// The maximum number of updates to return.
// If not specified, the server will pick an appropriate default.
int32 page_size = 1;
// A page token, received from a previous `ListUpdates` call.
// Provide this to retrieve the subsequent page.
// When paginating, all other parameters provided to `ListUpdates` must match
// the call that provided the page token.
string page_token = 2;
}
// ListUpdatesResponse is the response message for ListUpdates.
message ListUpdatesResponse {
// The list of updates.
repeated Update updates = 1;
// A token that can be sent as `page_token` to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
string next_page_token = 2;
}
// GetUpdateRequest is the request message for GetUpdate.
message GetUpdateRequest {
// The name of the update to retrieve.
string name = 1 [(google.api.field_behavior) = REQUIRED];
}

View File

@ -0,0 +1,35 @@
syntax = "proto3";
package peridot.tools.kernelmanager.v1;
import "google/protobuf/timestamp.proto";
import "tools/kernelmanager/proto/v1/kernel.proto";
option java_multiple_files = true;
option java_outer_classname = "KernelProto";
option java_package = "org.resf.peridot.tools.kernelmanager.v1";
option go_package = "go.resf.org/peridot/tools/kernelmanager/pb;kernelmanagerpb";
// Kernel update message
message Update {
// Full kernel
Kernel kernel = 1;
// Secure Boot upload time
google.protobuf.Timestamp secure_boot_upload_time = 2;
// Kernel.org tarball sha256 checksum (only applicable for repacked kernels)
string kernel_org_tarball_sha256 = 3;
// Kernel.org tarball PGP identity (only applicable for repacked kernels)
string kernel_org_tarball_pgp_identity = 4;
// Kernel.org version (only applicable for repacked kernels)
string kernel_org_version = 5;
// Started time
google.protobuf.Timestamp started_time = 6;
// Finished time
google.protobuf.Timestamp finished_time = 7;
}

View File

@ -0,0 +1,4 @@
# kernelmanager
kernelmanager helps RESF manage uploads of SB variants of kernels as well as maintain ML/LT
**IMPORTANT: Only supports triggering imports manually as of now with NO builds, that is coming soon**

32
tools/kernelmanager/rpc/BUILD vendored Normal file
View File

@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "rpc",
srcs = [
"kernel.go",
"operation.go",
"rpc.go",
],
importpath = "go.resf.org/peridot/tools/kernelmanager/rpc",
visibility = ["//visibility:public"],
deps = [
"//base/go",
"//base/go/kv",
"//third_party/googleapis/google/longrunning:longrunning_go_proto",
"//tools/kernelmanager/proto/v1:pb",
"//tools/mothership/proto/v1:pb",
"//vendor/go.temporal.io/api/enums/v1:enums",
"//vendor/go.temporal.io/api/serviceerror",
"//vendor/go.temporal.io/api/workflowservice/v1:workflowservice",
"//vendor/go.temporal.io/sdk/client",
"@go_googleapis//google/rpc:code_go_proto",
"@go_googleapis//google/rpc:status_go_proto",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//reflection",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//proto",
"@org_golang_google_protobuf//types/known/anypb",
"@org_golang_google_protobuf//types/known/timestamppb",
],
)

View File

@ -0,0 +1,146 @@
package kernelmanager_rpc
import (
"context"
"errors"
"fmt"
base "go.resf.org/peridot/base/go"
"go.resf.org/peridot/base/go/kv"
kernelmanagerpb "go.resf.org/peridot/tools/kernelmanager/pb"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
func getPrefixEnd(key []byte) []byte {
end := make([]byte, len(key))
copy(end, key)
for i := len(end) - 1; i >= 0; i-- {
if end[i] < 0xff {
end[i] = end[i] + 1
end = end[:i+1]
return end
}
}
// next prefix does not exist (e.g., 0xffff);
// default to WithFromKey policy
return []byte{0}
}
func (s *Server) ListKernels(ctx context.Context, req *kernelmanagerpb.ListKernelsRequest) (*kernelmanagerpb.ListKernelsResponse, error) {
// Min page size is 1, max page size is 100.
if req.PageSize < 1 {
req.PageSize = 20
}
if req.PageSize > 100 {
req.PageSize = 100
}
prefix := fmt.Sprintf("/kernels/entries/")
query, err := s.kv.RangePrefix(ctx, prefix, req.PageSize, req.PageToken)
if err != nil {
base.LogErrorf("failed to get kernels: %v", err)
return nil, status.Error(codes.Internal, "failed to get kernels")
}
var kernels []*kernelmanagerpb.Kernel
for _, pair := range query.Pairs {
kernel := &kernelmanagerpb.Kernel{}
err := proto.Unmarshal(pair.Value, kernel)
if err != nil {
base.LogErrorf("failed to unmarshal kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to unmarshal kernel")
}
kernels = append(kernels, kernel)
}
return &kernelmanagerpb.ListKernelsResponse{
Kernels: kernels,
NextPageToken: query.NextToken,
}, nil
}
func (s *Server) GetKernel(ctx context.Context, req *kernelmanagerpb.GetKernelRequest) (*kernelmanagerpb.Kernel, error) {
if req.Name == "" {
return nil, status.Error(codes.InvalidArgument, "name must be provided")
}
kernelBytes, err := s.kv.Get(ctx, fmt.Sprintf("/kernels/entries/%s", req.Name))
if err != nil {
base.LogErrorf("failed to get kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to get kernel")
}
kernel := &kernelmanagerpb.Kernel{}
err = proto.Unmarshal(kernelBytes.Value, kernel)
if err != nil {
base.LogErrorf("failed to unmarshal kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to unmarshal kernel")
}
return kernel, nil
}
func (s *Server) CreateKernel(ctx context.Context, req *kernelmanagerpb.CreateKernelRequest) (*kernelmanagerpb.Kernel, error) {
if req.Kernel == nil {
return nil, status.Error(codes.InvalidArgument, "kernel must be provided")
}
// Verify first that the custom name is not already taken
prefix := fmt.Sprintf("/kernels/entries/%s/", req.Kernel.Name)
query, err := s.kv.RangePrefix(ctx, prefix, 1, "")
if err != nil {
base.LogErrorf("failed to get kernels: %v", err)
return nil, status.Error(codes.Internal, "failed to get kernels")
}
if len(query.Pairs) > 0 {
return nil, status.Error(codes.InvalidArgument, "kernel name already taken")
}
name := fmt.Sprintf("%s/%s", req.Kernel.Name, base.NameGen("kernels"))
req.Kernel.Name = name
kernelBytes, err := proto.Marshal(req.Kernel)
if err != nil {
base.LogErrorf("failed to marshal kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to marshal kernel")
}
err = s.kv.Set(ctx, fmt.Sprintf("/kernels/entries/%s", name), kernelBytes)
if err != nil {
base.LogErrorf("failed to set kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to set kernel")
}
return req.Kernel, nil
}
func (s *Server) UpdateKernel(ctx context.Context, req *kernelmanagerpb.UpdateKernelRequest) (*kernelmanagerpb.Kernel, error) {
if req.Kernel == nil {
return nil, status.Error(codes.InvalidArgument, "kernel must be provided")
}
// Check existing kernel
_, err := s.kv.Get(ctx, fmt.Sprintf("/kernels/entries/%s", req.Kernel.Name))
if err != nil {
if errors.Is(err, kv.ErrNotFound) {
return nil, status.Error(codes.NotFound, "kernel not found")
}
base.LogErrorf("failed to get kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to get kernel")
}
kernelBytes, err := proto.Marshal(req.Kernel)
if err != nil {
base.LogErrorf("failed to marshal kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to marshal kernel")
}
err = s.kv.Set(ctx, fmt.Sprintf("/kernels/entries/%s", req.Kernel.Name), kernelBytes)
if err != nil {
base.LogErrorf("failed to set kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to set kernel")
}
return req.Kernel, nil
}

View File

@ -0,0 +1,144 @@
// Copyright 2023 Peridot Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernelmanager_rpc
import (
"context"
base "go.resf.org/peridot/base/go"
kernelmanagerpb "go.resf.org/peridot/tools/kernelmanager/pb"
mothershippb "go.resf.org/peridot/tools/mothership/pb"
v11 "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/api/workflowservice/v1"
"google.golang.org/genproto/googleapis/longrunning"
rpccode "google.golang.org/genproto/googleapis/rpc/code"
rpcstatus "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/timestamppb"
)
func (s *Server) describeWorkflowToOperation(ctx context.Context, res *workflowservice.DescribeWorkflowExecutionResponse) (*longrunning.Operation, error) {
if res.WorkflowExecutionInfo == nil {
return nil, status.Error(codes.NotFound, "workflow not found")
}
if res.WorkflowExecutionInfo.Execution == nil {
return nil, status.Error(codes.NotFound, "workflow not found")
}
op := &longrunning.Operation{
Name: res.WorkflowExecutionInfo.Execution.WorkflowId,
}
// If the workflow is not running, we can mark the operation as done
if res.WorkflowExecutionInfo.Status != v11.WORKFLOW_EXECUTION_STATUS_RUNNING {
op.Done = true
}
// Add metadata
rpmMetadata := &kernelmanagerpb.KernelManagerMetadata{
StartTime: nil,
EndTime: nil,
}
st := res.WorkflowExecutionInfo.GetStartTime()
if st != nil {
rpmMetadata.StartTime = timestamppb.New(*st)
}
et := res.WorkflowExecutionInfo.GetCloseTime()
if et != nil {
rpmMetadata.EndTime = timestamppb.New(*et)
}
rpmMetadataAny, err := anypb.New(rpmMetadata)
if err != nil {
return op, nil
}
op.Metadata = rpmMetadataAny
// If completed, add result
// If failed, add error
if res.WorkflowExecutionInfo.Status == v11.WORKFLOW_EXECUTION_STATUS_COMPLETED {
// Complete, we need to get the result using GetWorkflow
run := s.temporal.GetWorkflow(ctx, op.Name, "")
var res mothershippb.ProcessRPMResponse
if err := run.Get(ctx, &res); err != nil {
return nil, err
}
resAny, err := anypb.New(&res)
if err != nil {
return nil, err
}
op.Result = &longrunning.Operation_Response{Response: resAny}
} else if res.WorkflowExecutionInfo.Status == v11.WORKFLOW_EXECUTION_STATUS_FAILED {
// Failed, we need to get the error using GetWorkflow
run := s.temporal.GetWorkflow(ctx, op.Name, "")
err := run.Get(ctx, nil)
// No error so return with a generic error
if err == nil {
op.Result = &longrunning.Operation_Error{
Error: &rpcstatus.Status{
Code: int32(rpccode.Code_INTERNAL),
Message: "workflow failed",
},
}
return op, nil
}
// Error, so return with the error
op.Result = &longrunning.Operation_Error{
Error: &rpcstatus.Status{
Code: int32(rpccode.Code_FAILED_PRECONDITION),
Message: err.Error(),
},
}
} else if res.WorkflowExecutionInfo.Status == v11.WORKFLOW_EXECUTION_STATUS_CANCELED {
// Error, so return with the error
op.Result = &longrunning.Operation_Error{
Error: &rpcstatus.Status{
Code: int32(rpccode.Code_CANCELLED),
Message: "workflow canceled",
},
}
}
return op, nil
}
func (s *Server) getOperation(ctx context.Context, name string) (*longrunning.Operation, error) {
res, err := s.temporal.DescribeWorkflowExecution(ctx, name, "")
if err != nil {
if _, ok := err.(*serviceerror.NotFound); ok {
return nil, status.Error(codes.NotFound, "workflow not found")
}
// Log error, but user doesn't need to know about it
base.LogErrorf("failed to describe workflow: %v", err)
return &longrunning.Operation{
Name: name,
}, nil
}
return s.describeWorkflowToOperation(ctx, res)
}
func (s *Server) GetOperation(ctx context.Context, req *longrunning.GetOperationRequest) (*longrunning.Operation, error) {
// Get from Temporal. We don't care about long term storage, so we don't
// need to store the operation in the database.
return s.getOperation(ctx, req.Name)
}

View File

@ -0,0 +1,57 @@
package kernelmanager_rpc
import (
base "go.resf.org/peridot/base/go"
"go.resf.org/peridot/base/go/kv"
kernelmanagerpb "go.resf.org/peridot/tools/kernelmanager/pb"
"go.temporal.io/sdk/client"
"google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
type Server struct {
base.GRPCServer
longrunning.UnimplementedOperationsServer
kernelmanagerpb.UnimplementedKernelManagerServer
kv kv.KV
temporal client.Client
}
func NewServer(kv kv.KV, temporalClient client.Client, oidcInterceptorDetails *base.OidcInterceptorDetails, opts ...base.GRPCServerOption) (*Server, error) {
oidcInterceptor, err := base.OidcGrpcInterceptor(oidcInterceptorDetails)
if err != nil {
return nil, err
}
opts = append(opts, base.WithUnaryInterceptors(oidcInterceptor))
grpcServer, err := base.NewGRPCServer(opts...)
if err != nil {
return nil, err
}
return &Server{
GRPCServer: *grpcServer,
kv: kv,
temporal: temporalClient,
}, nil
}
func (s *Server) Start() error {
s.RegisterService(func(server *grpc.Server) {
reflection.Register(server)
longrunning.RegisterOperationsServer(server, s)
kernelmanagerpb.RegisterKernelManagerServer(server, s)
})
if err := s.GatewayEndpoints(
longrunning.RegisterOperationsHandler,
kernelmanagerpb.RegisterKernelManagerHandler,
); err != nil {
return err
}
return s.GRPCServer.Start()
}

View File

@ -0,0 +1,76 @@
/**
* Copyright 2023 Peridot Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from 'react';
import { Navigate, Route, Routes } from 'react-router-dom';
import AppBar from '@mui/material/AppBar';
import Box from '@mui/material/Box';
import Toolbar from '@mui/material/Toolbar';
import Typography from '@mui/material/Typography';
import Button from '@mui/material/Button';
import { Theme } from '@mui/material/styles';
import { Kernels } from './Kernels';
export const App = () => {
return (
<Box sx={{ display: 'flex' }}>
<AppBar
elevation={5}
position="fixed"
sx={{ zIndex: (theme: Theme) => theme.zIndex.drawer + 1 }}
>
<Toolbar variant="dense">
<Typography variant="h6" component="div" sx={{ flexGrow: 1 }}>
RESF KernelManager{window.__beta__ ? ' (beta)' : ''}
</Typography>
<Box sx={{ flexGrow: 1, textAlign: 'right' }}>
{window.__peridot_user__ ? (
<>
<Button variant="primary">
{window.__peridot_user__.email}
</Button>
<Button
className="native-link"
href="/auth/oidc/logout"
variant="primary"
>
Logout
</Button>
</>
) : (
<Button
className="native-link"
href="/auth/oidc/login"
variant="primary"
>
Login
</Button>
)}
</Box>
</Toolbar>
</AppBar>
<Box component="main" sx={{ p: 3, flexGrow: 1 }}>
<Toolbar variant="dense" />
<Routes>
<Route index element={<Navigate to="/kernels" replace />} />
<Route path="/kernels">
<Route index element={<Kernels />} />
</Route>
</Routes>
</Box>
</Box>
);
};

38
tools/kernelmanager/ui/BUILD vendored Normal file
View File

@ -0,0 +1,38 @@
# Copyright 2023 Peridot Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("//tools/build_rules/ui_bundle:defs.bzl", "ui_bundle")
ui_bundle(
name = "bundle",
deps = [
"//:node_modules/@mui/icons-material",
"//:node_modules/@mui/material",
"//base/ts/mui",
"//tools/kernelmanager/proto/v1:kernelmanagerpb_ts_proto",
],
)
go_library(
name = "ui",
srcs = ["ui.go"],
# keep
embedsrcs = [
":bundle", # keep
],
importpath = "go.resf.org/peridot/tools/kernelmanager/ui",
visibility = ["//visibility:public"],
deps = ["//base/go"],
)

View File

@ -0,0 +1,25 @@
import * as React from 'react';
import { ResourceTable } from 'base/ts/mui/ResourceTable';
import {
V1ListKernelsResponse,
V1Kernel,
} from 'bazel-bin/tools/kernelmanager/proto/v1/kernelmanagerpb_ts_proto_gen';
import { reqap } from 'base/ts/reqap';
import { kernelManagerApi } from 'tools/kernelmanager/ui/api';
export const Kernels = () => {
return (
<ResourceTable<V1Kernel>
hideFilter
load={(pageSize: number, pageToken?: string, filter?: string) => reqap(kernelManagerApi.listKernels({
pageSize,
pageToken,
}))}
transform={((response: V1ListKernelsResponse) => response.kernels || [])}
fields={[
{ key: 'name', label: 'Kernel Name' },
]}
/>
);
};

View File

@ -0,0 +1,23 @@
/**
* Copyright 2023 Peridot Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as kernelManager from 'bazel-bin/tools/kernelmanager/proto/v1/kernelmanagerpb_ts_proto_gen';
const cfg = new kernelManager.Configuration({
basePath: '/api',
})
export const kernelManagerApi = new kernelManager.KernelManagerApi(cfg);

View File

@ -0,0 +1,35 @@
/**
* Copyright 2023 Peridot Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from 'react';
import { createRoot } from 'react-dom/client';
import { BrowserRouter } from 'react-router-dom';
import CssBaseline from '@mui/material/CssBaseline';
import ThemeProvider from '@mui/material/styles/ThemeProvider';
import { peridotDarkTheme } from 'base/ts/mui/theme';
import { App } from './App';
const root = createRoot(document.getElementById('app') || document.body);
root.render(
<BrowserRouter basename={window.__peridot_prefix__ || ''}>
<ThemeProvider theme={peridotDarkTheme}>
<CssBaseline />
<App />
</ThemeProvider>
</BrowserRouter>
);

View File

@ -0,0 +1,33 @@
// Copyright 2023 Peridot Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernelmanager_ui
import (
"embed"
base "go.resf.org/peridot/base/go"
)
//go:embed *
var assets embed.FS
func InitFrontendInfo(info *base.FrontendInfo) *embed.FS {
if info == nil {
info = &base.FrontendInfo{}
}
info.Title = "RESF KernelManager"
info.AllowUnauthenticated = true
return &assets
}

35
tools/kernelmanager/worker/BUILD vendored Normal file
View File

@ -0,0 +1,35 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "worker",
srcs = [
"kernel.go",
"worker.go",
"workflows.go",
],
importpath = "go.resf.org/peridot/tools/kernelmanager/worker",
visibility = ["//visibility:public"],
deps = [
"//base/go",
"//base/go/forge",
"//base/go/kv",
"//base/go/storage",
"//tools/kernelmanager/kernel_repack",
"//tools/kernelmanager/kernel_repack/kernelorg",
"//tools/kernelmanager/kernel_repack/v1:kernel_repack",
"//tools/kernelmanager/proto/v1:pb",
"//vendor/github.com/go-git/go-billy/v5/memfs",
"//vendor/github.com/go-git/go-git/v5:go-git",
"//vendor/github.com/go-git/go-git/v5/config",
"//vendor/github.com/go-git/go-git/v5/plumbing",
"//vendor/github.com/go-git/go-git/v5/plumbing/object",
"//vendor/github.com/go-git/go-git/v5/storage/memory",
"//vendor/github.com/pkg/errors",
"//vendor/go.temporal.io/sdk/workflow",
"//vendor/golang.org/x/crypto/openpgp",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//proto",
"@org_golang_google_protobuf//types/known/timestamppb",
],
)

View File

@ -0,0 +1,253 @@
package kernelmanager_worker
import (
"context"
"fmt"
"github.com/go-git/go-billy/v5/memfs"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/storage/memory"
"github.com/pkg/errors"
base "go.resf.org/peridot/base/go"
"go.resf.org/peridot/tools/kernelmanager/kernel_repack"
"go.resf.org/peridot/tools/kernelmanager/kernel_repack/kernelorg"
repack_v1 "go.resf.org/peridot/tools/kernelmanager/kernel_repack/v1"
kernelmanagerpb "go.resf.org/peridot/tools/kernelmanager/pb"
"golang.org/x/crypto/openpgp"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
"strings"
"time"
)
func (w *Worker) GetKernel(ctx context.Context, name string) (*kernelmanagerpb.Kernel, error) {
kernelBytes, err := w.kv.Get(ctx, fmt.Sprintf("/kernels/entries/%s", name))
if err != nil {
base.LogErrorf("failed to get kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to get kernel")
}
kernel := &kernelmanagerpb.Kernel{}
err = proto.Unmarshal(kernelBytes.Value, kernel)
if err != nil {
base.LogErrorf("failed to unmarshal kernel: %v", err)
return nil, status.Error(codes.Internal, "failed to unmarshal kernel")
}
return kernel, nil
}
func (w *Worker) KernelRepack(ctx context.Context, kernel *kernelmanagerpb.Kernel) (*kernelmanagerpb.Update, error) {
gitForge := w.forge.WithNamespace(kernel.Config.ScmNamespace)
gitRemote := gitForge.GetRemote(kernel.Pkg)
gitAuth, err := gitForge.GetAuthenticator()
if err != nil {
return nil, err
}
err = gitForge.EnsureRepositoryExists(gitAuth, kernel.Pkg)
if err != nil {
return nil, err
}
// Clone the repository, to the target filesystem.
// We do an init, then a fetch, then a checkout
// If the repo doesn't exist, then we init only
storer := memory.NewStorage()
fs := memfs.New()
repo, err := git.Init(storer, fs)
if err != nil {
return nil, errors.Wrap(err, "failed to init git repo")
}
wt, err := repo.Worktree()
if err != nil {
return nil, errors.Wrap(err, "failed to get worktree")
}
// Create refspecs list
var refspecs []config.RefSpec
for _, branch := range kernel.Config.ScmBranches {
refspecs = append(refspecs, config.RefSpec(fmt.Sprintf("refs/heads/%s:refs/heads/%[1]s", branch)))
}
// Create a new remote
_, err = repo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{gitRemote},
Fetch: refspecs,
})
if err != nil {
return nil, errors.Wrap(err, "failed to create remote")
}
// Fetch the remote
_ = repo.Fetch(&git.FetchOptions{
Auth: gitAuth.AuthMethod,
RemoteName: "origin",
RefSpecs: refspecs,
})
// Checkout the branch
// refName := plumbing.NewBranchReferenceName(branch)
// if err != nil {
// h := plumbing.NewSymbolicReference(plumbing.HEAD, refName)
// if err := repo.Storer.CheckAndSetReference(h, nil); err != nil {
// return nil, "", errors.Wrap(err, "failed to checkout branch")
// }
// } else {
// err = wt.Checkout(&git.CheckoutOptions{
// Branch: plumbing.NewBranchReferenceName(branch),
// Force: true,
// })
// }
var output *kernel_repack.Output
var entity *openpgp.Entity
var version string
// BuildID should be YYYYMMDDHHMM
buildID := time.Now().Format("200601021504")
changelog := func(version string) []*repack_v1.ChangelogEntry {
msg := fmt.Sprintf("Rebase to %s", version)
return []*repack_v1.ChangelogEntry{
{
Date: time.Now().Format("Mon Jan 02 2006"),
Name: "Mustafa Gezen",
Version: version,
BuildID: buildID,
Text: msg,
},
}
}
switch kernel.Config.RepackOptions.KernelOrgVariant {
case kernelmanagerpb.RepackOptions_MAINLINE:
mlVersion, mlTarball, _, err := kernelorg.GetLatestML()
if err != nil {
return nil, err
}
version = mlVersion
out, err := repack_v1.ML(&repack_v1.Input{
Version: mlVersion,
BuildID: buildID,
KernelPackage: kernel.Pkg,
Tarball: mlTarball,
Changelog: changelog(mlVersion),
})
if err != nil {
return nil, err
}
output = out
case kernelmanagerpb.RepackOptions_LONGTERM:
repackVersion := kernel.Config.RepackOptions.KernelOrgVersion
if !strings.HasSuffix(repackVersion, ".") {
repackVersion = repackVersion + "."
}
ltVersion, ltTarball, ltEntity, err := kernelorg.GetLatestLT(repackVersion)
if err != nil {
return nil, err
}
version = ltVersion
out, err := repack_v1.LT(&repack_v1.Input{
Version: ltVersion,
BuildID: buildID,
KernelPackage: kernel.Pkg,
Tarball: ltTarball,
Changelog: changelog(ltVersion),
})
if err != nil {
return nil, err
}
output = out
entity = ltEntity
}
// Upload tarball to S3
// Check out each branch, delete all files in SOURCES, then extract to FS.
for _, branch := range kernel.Config.ScmBranches {
refName := plumbing.NewBranchReferenceName(branch)
err := wt.Checkout(&git.CheckoutOptions{
Branch: refName,
})
if err != nil {
h := plumbing.NewSymbolicReference(plumbing.HEAD, refName)
if err := repo.Storer.CheckAndSetReference(h, nil); err != nil {
return nil, errors.Wrap(err, "failed to checkout branch")
}
}
// Delete all files in SOURCES
files, err := fs.ReadDir("SOURCES")
if err != nil {
return nil, errors.Wrap(err, "failed to read SOURCES directory")
}
for _, file := range files {
err = fs.Remove(fmt.Sprintf("SOURCES/%s", file.Name()))
if err != nil {
return nil, errors.Wrap(err, "failed to remove file")
}
}
// Extract to FS
err = output.ToFS(fs)
if err != nil {
return nil, errors.Wrap(err, "failed to write output files")
}
// Commit changes
_, err = wt.Add(".")
if err != nil {
return nil, errors.Wrap(err, "failed to add files to git")
}
msg := fmt.Sprintf("Repacking %s from kernel.org - %s - %s", kernel.Pkg, version, buildID)
_, err = wt.Commit(msg, &git.CommitOptions{
AllowEmptyCommits: true,
Author: &object.Signature{
Name: gitAuth.AuthorName,
Email: gitAuth.AuthorEmail,
When: time.Now(),
},
})
if err != nil {
return nil, errors.Wrap(err, "failed to commit changes")
}
}
// Push changes
err = repo.Push(&git.PushOptions{
Auth: gitAuth.AuthMethod,
RemoteName: "origin",
RefSpecs: refspecs,
})
if err != nil {
return nil, errors.Wrap(err, "failed to push changes")
}
// Create update
update := &kernelmanagerpb.Update{
Kernel: kernel,
KernelOrgTarballSha256: output.TarballSha256,
KernelOrgTarballPgpIdentity: "",
KernelOrgVersion: version,
FinishedTime: timestamppb.New(time.Now()),
}
if entity != nil {
update.KernelOrgTarballPgpIdentity = entity.PrimaryKey.KeyIdShortString()
}
return update, nil
}

View File

@ -0,0 +1,35 @@
// Copyright 2023 Peridot Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernelmanager_worker
import (
"go.resf.org/peridot/base/go/forge"
"go.resf.org/peridot/base/go/kv"
"go.resf.org/peridot/base/go/storage"
)
type Worker struct {
kv kv.KV
forge forge.Forge
storage storage.Storage
}
func New(kv kv.KV, forge forge.Forge, st storage.Storage) *Worker {
return &Worker{
kv: kv,
forge: forge,
storage: st,
}
}

Some files were not shown because too many files have changed in this diff Show More