fix: remove remaining fatal logs and turn them into errors

This commit is contained in:
Mustafa Gezen 2021-09-10 22:31:59 +02:00
parent 985748b05d
commit 373e96f0c9
Signed by: mustafa
GPG Key ID: DCDF010D946438C1
12 changed files with 102 additions and 69 deletions

View File

@ -21,7 +21,7 @@
package blob
type Storage interface {
Write(path string, content []byte)
Read(path string) []byte
Exists(path string) bool
Write(path string, content []byte) error
Read(path string) ([]byte, error)
Exists(path string) (bool, error)
}

View File

@ -21,8 +21,8 @@
package file
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
)
@ -37,38 +37,47 @@ func New(path string) *File {
}
}
func (f *File) Write(path string, content []byte) {
func (f *File) Write(path string, content []byte) error {
w, err := os.OpenFile(filepath.Join(f.path, path), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
if err != nil {
log.Fatalf("could not open file: %v", err)
return fmt.Errorf("could not open file: %v", err)
}
_, err = w.Write(content)
if err != nil {
log.Fatalf("could not write file to file: %v", err)
return fmt.Errorf("could not write file to file: %v", err)
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
log.Fatalf("could not close file writer to source: %v", err)
return fmt.Errorf("could not close file writer to source: %v", err)
}
return nil
}
func (f *File) Read(path string) []byte {
func (f *File) Read(path string) ([]byte, error) {
r, err := os.OpenFile(filepath.Join(f.path, path), os.O_RDONLY, 0644)
if err != nil {
return nil
return nil, err
}
body, err := ioutil.ReadAll(r)
if err != nil {
return nil
return nil, err
}
return body
return body, nil
}
func (f *File) Exists(path string) bool {
func (f *File) Exists(path string) (bool, error) {
_, err := os.Stat(filepath.Join(f.path, path))
return !os.IsNotExist(err)
if !os.IsNotExist(err) {
if !os.IsExist(err) {
return false, err
}
return true, nil
}
return false, nil
}

View File

@ -23,62 +23,64 @@ package gcs
import (
"cloud.google.com/go/storage"
"context"
"fmt"
"io/ioutil"
"log"
)
type GCS struct {
bucket *storage.BucketHandle
}
func New(name string) *GCS {
func New(name string) (*GCS, error) {
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatalf("could not create gcloud client: %v", err)
return nil, fmt.Errorf("could not create gcloud client: %v", err)
}
return &GCS{
bucket: client.Bucket(name),
}
}, nil
}
func (g *GCS) Write(path string, content []byte) {
func (g *GCS) Write(path string, content []byte) error {
ctx := context.Background()
obj := g.bucket.Object(path)
w := obj.NewWriter(ctx)
_, err := w.Write(content)
if err != nil {
log.Fatalf("could not write file to gcs: %v", err)
return fmt.Errorf("could not write file to gcs: %v", err)
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
log.Fatalf("could not close gcs writer to source: %v", err)
return fmt.Errorf("could not close gcs writer to source: %v", err)
}
return nil
}
func (g *GCS) Read(path string) []byte {
func (g *GCS) Read(path string) ([]byte, error) {
ctx := context.Background()
obj := g.bucket.Object(path)
r, err := obj.NewReader(ctx)
if err != nil {
return nil
return nil, err
}
body, err := ioutil.ReadAll(r)
if err != nil {
return nil
return nil, err
}
return body
return body, nil
}
func (g *GCS) Exists(path string) bool {
func (g *GCS) Exists(path string) (bool, error) {
ctx := context.Background()
obj := g.bucket.Object(path)
_, err := obj.NewReader(ctx)
return err == nil
return err == nil, nil
}

View File

@ -29,7 +29,6 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/spf13/viper"
"io/ioutil"
"log"
)
type S3 struct {
@ -65,7 +64,7 @@ func New(name string) *S3 {
}
}
func (s *S3) Write(path string, content []byte) {
func (s *S3) Write(path string, content []byte) error {
buf := bytes.NewBuffer(content)
_, err := s.uploader.Upload(&s3manager.UploadInput{
@ -74,31 +73,33 @@ func (s *S3) Write(path string, content []byte) {
Body: buf,
})
if err != nil {
log.Fatalf("failed to upload file to s3, %v", err)
return err
}
return nil
}
func (s *S3) Read(path string) []byte {
func (s *S3) Read(path string) ([]byte, error) {
obj, err := s.uploader.S3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(path),
})
if err != nil {
return nil
return nil, err
}
body, err := ioutil.ReadAll(obj.Body)
if err != nil {
return nil
return nil, err
}
return body
return body, nil
}
func (s *S3) Exists(path string) bool {
func (s *S3) Exists(path string) (bool, error) {
_, err := s.uploader.S3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(path),
})
return err == nil
return err == nil, nil
}

View File

@ -31,8 +31,6 @@ type FsCreatorFunc func(branch string) (billy.Filesystem, error)
type ProcessData struct {
RpmLocation string
UpstreamPrefix string
SshKeyLocation string
SshUser string
Version int
GitCommitterName string
GitCommitterEmail string

View File

@ -62,7 +62,11 @@ func add(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTree
break
case *srpmprocpb.Add_Lookaside:
filePath = checkAddPrefix(eitherString(filepath.Base(addType.Lookaside), add.Name))
replacingBytes = pd.BlobStorage.Read(addType.Lookaside)
var err error
replacingBytes, err = pd.BlobStorage.Read(addType.Lookaside)
if err != nil {
return err
}
hashFunction := data.CompareHash(replacingBytes, addType.Lookaside)
if hashFunction == nil {

View File

@ -21,9 +21,6 @@
package directives
import (
"encoding/json"
"log"
"os"
"path/filepath"
"strings"
@ -41,8 +38,8 @@ func checkAddPrefix(file string) string {
return filepath.Join("SOURCES", file)
}
func Apply(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) {
var errs []string
func Apply(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) []error {
var errs []error
directives := []func(*srpmprocpb.Cfg, *data.ProcessData, *data.ModeData, *git.Worktree, *git.Worktree) error{
replace,
@ -56,16 +53,13 @@ func Apply(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTr
for _, directive := range directives {
err := directive(cfg, pd, md, patchTree, pushTree)
if err != nil {
errs = append(errs, err.Error())
errs = append(errs, err)
}
}
if len(errs) > 0 {
err := json.NewEncoder(os.Stdout).Encode(errs)
if err != nil {
log.Fatal(errs)
}
os.Exit(1)
return errs
}
return nil
}

View File

@ -73,13 +73,16 @@ func replace(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchT
}
break
case *srpmprocpb.Replace_WithLookaside:
bts := pd.BlobStorage.Read(replacing.WithLookaside)
bts, err := pd.BlobStorage.Read(replacing.WithLookaside)
if err != nil {
return err
}
hasher := data.CompareHash(bts, replacing.WithLookaside)
if hasher == nil {
return errors.New("LOOKASIDE_FILE_AND_HASH_NOT_MATCHING")
}
_, err := f.Write(bts)
_, err = f.Write(bts)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_LOOKASIDE:%s", filePath))
}

View File

@ -235,7 +235,10 @@ func (g *GitMode) WriteSource(pd *data.ProcessData, md *data.ModeData) error {
body = md.BlobCache[hash]
log.Printf("retrieving %s from cache", hash)
} else {
fromBlobStorage := pd.BlobStorage.Read(hash)
fromBlobStorage, err := pd.BlobStorage.Read(hash)
if err != nil {
return err
}
if fromBlobStorage != nil && !pd.NoStorageDownload {
body = fromBlobStorage
log.Printf("downloading %s from blob storage", hash)
@ -274,7 +277,7 @@ func (g *GitMode) WriteSource(pd *data.ProcessData, md *data.ModeData) error {
hasher := data.CompareHash(body, hash)
if hasher == nil {
log.Fatal("checksum in metadata does not match dist-git file")
return fmt.Errorf("checksum in metadata does not match dist-git file")
}
md.SourcesToIgnore = append(md.SourcesToIgnore, &data.IgnoredSource{

View File

@ -77,7 +77,7 @@ func Fetch(cdnUrl string, dir string) error {
hasher := data.CompareHash(body, hash)
if hasher == nil {
log.Fatal("checksum in metadata does not match dist-git file")
return fmt.Errorf("checksum in metadata does not match dist-git file")
}
err = os.MkdirAll(filepath.Dir(path), 0755)

View File

@ -21,10 +21,12 @@
package srpmproc
import (
"encoding/json"
"fmt"
"github.com/rocky-linux/srpmproc/pkg/misc"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
@ -74,7 +76,15 @@ func cfgPatches(pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree
return fmt.Errorf("could not unmarshal cfg file: %v", err)
}
directives.Apply(&cfg, pd, md, patchTree, pushTree)
errs := directives.Apply(&cfg, pd, md, patchTree, pushTree)
if errs != nil {
err := json.NewEncoder(os.Stdout).Encode(errs)
if err != nil {
return err
}
return fmt.Errorf("directives could not be applied")
}
}
}
@ -251,7 +261,7 @@ func getTipStream(pd *data.ProcessData, module string, pushBranch string, origPu
for _, ref := range list {
log.Println(pushBranch, ref.Name())
}
log.Fatal("could not find tip hash")
return "", fmt.Errorf("could not find tip hash")
}
return strings.TrimSpace(tipHash), nil
@ -324,7 +334,7 @@ func patchModuleYaml(pd *data.ProcessData, md *data.ModeData) error {
} else if strings.HasPrefix(rpm.Ref, "rhel-") {
pushBranch = defaultBranch
} else {
log.Fatal("could not recognize modulemd ref")
return fmt.Errorf("could not recognize modulemd ref")
}
tipHash, err = getTipStream(pd, name, pushBranch, md.PushBranch, 0)

View File

@ -144,13 +144,17 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) {
var blobStorage blob.Storage
if strings.HasPrefix(req.StorageAddr, "gs://") {
blobStorage = gcs.New(strings.Replace(req.StorageAddr, "gs://", "", 1))
var err error
blobStorage, err = gcs.New(strings.Replace(req.StorageAddr, "gs://", "", 1))
if err != nil {
return nil, err
}
} else if strings.HasPrefix(req.StorageAddr, "s3://") {
blobStorage = s3.New(strings.Replace(req.StorageAddr, "s3://", "", 1))
} else if strings.HasPrefix(req.StorageAddr, "file://") {
blobStorage = file.New(strings.Replace(req.StorageAddr, "file://", "", 1))
} else {
log.Fatalf("invalid blob storage")
return nil, fmt.Errorf("invalid blob storage")
}
sourceRpmLocation := ""
@ -165,7 +169,7 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) {
if lastKeyLocation == "" {
usr, err := user.Current()
if err != nil {
log.Fatalf("could not get user: %v", err)
return nil, fmt.Errorf("could not get user: %v", err)
}
lastKeyLocation = filepath.Join(usr.HomeDir, ".ssh/id_rsa")
}
@ -183,7 +187,7 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) {
authenticator, err = ssh.NewPublicKeysFromFile(req.SshUser, lastKeyLocation, "")
}
if err != nil {
log.Fatalf("could not get git authenticator: %v", err)
return nil, fmt.Errorf("could not get git authenticator: %v", err)
}
fsCreator := func(branch string) (billy.Filesystem, error) {
@ -204,7 +208,7 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) {
tmpDir := filepath.Join(req.TmpFsMode, branch)
err = fs.MkdirAll(tmpDir, 0755)
if err != nil {
log.Fatalf("could not create tmpfs dir: %v", err)
return nil, fmt.Errorf("could not create tmpfs dir: %v", err)
}
nFs, err := fs.Chroot(tmpDir)
if err != nil {
@ -226,8 +230,6 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) {
Importer: importer,
RpmLocation: sourceRpmLocation,
UpstreamPrefix: req.UpstreamPrefix,
SshKeyLocation: lastKeyLocation,
SshUser: req.SshUser,
Version: req.Version,
BlobStorage: blobStorage,
GitCommitterName: req.GitCommitterName,
@ -321,7 +323,7 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) {
for _, commit := range pd.ManualCommits {
branchCommit := strings.Split(commit, ":")
if len(branchCommit) != 2 {
log.Fatalln("invalid manual commit list")
return nil, fmt.Errorf("invalid manual commit list")
}
head := fmt.Sprintf("refs/tags/imports/%s/%s-%s", branchCommit[0], md.Name, branchCommit[1])
@ -497,8 +499,15 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) {
if data.StrContains(alreadyUploadedBlobs, checksum) {
continue
}
if !pd.BlobStorage.Exists(checksum) && !pd.NoStorageUpload {
pd.BlobStorage.Write(checksum, sourceFileBts)
exists, err := pd.BlobStorage.Exists(checksum)
if err != nil {
return nil, err
}
if !exists && !pd.NoStorageUpload {
err := pd.BlobStorage.Write(checksum, sourceFileBts)
if err != nil {
return nil, err
}
log.Printf("wrote %s to blob storage", checksum)
}
alreadyUploadedBlobs = append(alreadyUploadedBlobs, checksum)