This commit is contained in:
Mustafa Gezen 2023-08-31 10:17:01 +02:00
parent 6d46a34c82
commit 96cca525d1
Signed by: mustafa
GPG Key ID: DCDF010D946438C1
40 changed files with 6647 additions and 1 deletions

View File

@ -220,6 +220,12 @@ def go_dependencies():
sum = "h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=",
version = "v4.0.0",
)
go_repository(
name = "com_github_bluekeyes_go_gitdiff",
importpath = "github.com/bluekeyes/go-gitdiff",
sum = "h1:AXrIoy/VEA9Baz2lhwMlpdzDJ/sKof6C9yTt1oqw4hQ=",
version = "v0.5.0",
)
go_repository(
name = "com_github_bmatcuk_doublestar_v4",
@ -1214,6 +1220,13 @@ def go_dependencies():
sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_hcl",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_hcl_v2",
importpath = "github.com/hashicorp/hcl/v2",
@ -1621,6 +1634,13 @@ def go_dependencies():
sum = "h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=",
version = "v1.2.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_go_wordwrap",
importpath = "github.com/mitchellh/go-wordwrap",
@ -1962,6 +1982,12 @@ def go_dependencies():
sum = "h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=",
version = "v3.0.1",
)
go_repository(
name = "com_github_rocky_linux_srpmproc",
importpath = "github.com/rocky-linux/srpmproc",
sum = "h1:rVCtpFyrYI7kAj5XOCZEB4ZcBMfh/E2Vt01AxwJsfpE=",
version = "v0.5.0",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
@ -2099,6 +2125,12 @@ def go_dependencies():
sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=",
version = "v0.0.0-20180118202830-f09979ecbc72",
)
go_repository(
name = "com_github_spf13_afero",
importpath = "github.com/spf13/afero",
sum = "h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=",
version = "v1.1.2",
)
go_repository(
name = "com_github_spf13_cast",
@ -2113,6 +2145,12 @@ def go_dependencies():
sum = "h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=",
version = "v1.7.0",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_spf13_pflag",
@ -2120,6 +2158,13 @@ def go_dependencies():
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_spf13_viper",
importpath = "github.com/spf13/viper",
sum = "h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=",
version = "v1.7.0",
)
go_repository(
name = "com_github_stefanberger_go_pkcs11uri",
importpath = "github.com/stefanberger/go-pkcs11uri",
@ -2146,6 +2191,13 @@ def go_dependencies():
sum = "h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=",
version = "v1.8.4",
)
go_repository(
name = "com_github_subosito_gotenv",
importpath = "github.com/subosito/gotenv",
sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_syndtr_gocapability",
importpath = "github.com/syndtr/gocapability",
@ -3197,6 +3249,12 @@ def go_dependencies():
sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
version = "v0.9.1",
)
go_repository(
name = "in_gopkg_ini_v1",
importpath = "gopkg.in/ini.v1",
sum = "h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=",
version = "v1.51.0",
)
go_repository(
name = "in_gopkg_square_go_jose_v2",

6
go.mod
View File

@ -26,6 +26,7 @@ require (
github.com/jarcoal/httpmock v1.3.1
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.16.0
github.com/rocky-linux/srpmproc v0.5.0
github.com/sassoftware/go-rpmutils v0.2.0
github.com/stretchr/testify v1.8.4
github.com/temporalio/temporalite v0.3.1-0.20230117200252-2df426ad3426
@ -43,6 +44,7 @@ require (
golang.org/x/crypto v0.12.0
golang.org/x/mod v0.12.0
golang.org/x/oauth2 v0.11.0
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d
google.golang.org/grpc v1.57.0
google.golang.org/protobuf v1.31.0
@ -82,6 +84,7 @@ require (
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/bluekeyes/go-gitdiff v0.5.0 // indirect
github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect
github.com/bufbuild/protocompile v0.6.0 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
@ -258,7 +261,6 @@ require (
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/api v0.138.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
@ -289,3 +291,5 @@ replace go.resf.org/peridot/tools/mothership/pb => ./bazel-bin/tools/mothership/
replace go.resf.org/peridot/third_party/bazel/src/main/protobuf => ./bazel-bin/third_party/bazel/src/main/protobuf/blaze_query_go_proto_/go.resf.org/peridot/third_party/bazel/src/main/protobuf
replace go.resf.org/peridot/tools/mothership/admin/pb => ./bazel-bin/tools/mothership/proto/admin/v1/mshipadminpb_go_proto_/go.resf.org/peridot/tools/mothership/admin/pb
replace google.golang.org/genproto/googleapis/longrunning => ./bazel-bin/third_party/googleapis/google/longrunning/longrunning_go_proto_/google.golang.org/genproto/googleapis/longrunning

4
go.sum
View File

@ -452,6 +452,8 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bluekeyes/go-gitdiff v0.5.0 h1:AXrIoy/VEA9Baz2lhwMlpdzDJ/sKof6C9yTt1oqw4hQ=
github.com/bluekeyes/go-gitdiff v0.5.0/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM=
github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc=
github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
@ -964,6 +966,8 @@ github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rocky-linux/srpmproc v0.5.0 h1:rVCtpFyrYI7kAj5XOCZEB4ZcBMfh/E2Vt01AxwJsfpE=
github.com/rocky-linux/srpmproc v0.5.0/go.mod h1:x8Z2wqhV2JqRnYMhYz3thOQkfsSWjJkyX8DVGDPOb48=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=

21
vendor/github.com/bluekeyes/go-gitdiff/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Billy Keyes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

19
vendor/github.com/bluekeyes/go-gitdiff/gitdiff/BUILD generated vendored Normal file
View File

@ -0,0 +1,19 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "gitdiff",
srcs = [
"apply.go",
"base85.go",
"binary.go",
"file_header.go",
"gitdiff.go",
"io.go",
"parser.go",
"patch_header.go",
"text.go",
],
importmap = "go.resf.org/peridot/vendor/github.com/bluekeyes/go-gitdiff/gitdiff",
importpath = "github.com/bluekeyes/go-gitdiff/gitdiff",
visibility = ["//visibility:public"],
)

454
vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply.go generated vendored Normal file
View File

@ -0,0 +1,454 @@
package gitdiff
import (
"errors"
"fmt"
"io"
"sort"
)
// Conflict indicates an apply failed due to a conflict between the patch and
// the source content.
//
// Users can test if an error was caused by a conflict by using errors.Is with
// an empty Conflict:
//
// if errors.Is(err, &Conflict{}) {
// // handle conflict
// }
//
type Conflict struct {
msg string
}
func (c *Conflict) Error() string {
return "conflict: " + c.msg
}
// Is implements error matching for Conflict. Passing an empty instance of
// Conflict always returns true.
func (c *Conflict) Is(other error) bool {
if other, ok := other.(*Conflict); ok {
return other.msg == "" || other.msg == c.msg
}
return false
}
// ApplyError wraps an error that occurs during patch application with
// additional location information, if it is available.
type ApplyError struct {
// Line is the one-indexed line number in the source data
Line int64
// Fragment is the one-indexed fragment number in the file
Fragment int
// FragmentLine is the one-indexed line number in the fragment
FragmentLine int
err error
}
// Unwrap returns the wrapped error.
func (e *ApplyError) Unwrap() error {
return e.err
}
func (e *ApplyError) Error() string {
return fmt.Sprintf("%v", e.err)
}
type lineNum int
type fragNum int
type fragLineNum int
// applyError creates a new *ApplyError wrapping err or augments the information
// in err with args if it is already an *ApplyError. Returns nil if err is nil.
func applyError(err error, args ...interface{}) error {
if err == nil {
return nil
}
e, ok := err.(*ApplyError)
if !ok {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
e = &ApplyError{err: err}
}
for _, arg := range args {
switch v := arg.(type) {
case lineNum:
e.Line = int64(v) + 1
case fragNum:
e.Fragment = int(v) + 1
case fragLineNum:
e.FragmentLine = int(v) + 1
}
}
return e
}
var (
errApplyInProgress = errors.New("gitdiff: incompatible apply in progress")
)
const (
applyInitial = iota
applyText
applyBinary
applyFile
)
// Apply is a convenience function that creates an Applier for src with default
// settings and applies the changes in f, writing the result to dst.
func Apply(dst io.Writer, src io.ReaderAt, f *File) error {
return NewApplier(src).ApplyFile(dst, f)
}
// Applier applies changes described in fragments to source data. If changes
// are described in multiple fragments, those fragments must be applied in
// order, usually by calling ApplyFile.
//
// By default, Applier operates in "strict" mode, where fragment content and
// positions must exactly match those of the source.
//
// If an error occurs while applying, methods on Applier return instances of
// *ApplyError that annotate the wrapped error with additional information
// when available. If the error is because of a conflict between a fragment and
// the source, the wrapped error will be a *Conflict.
//
// While an Applier can apply both text and binary fragments, only one fragment
// type can be used without resetting the Applier. The first fragment applied
// sets the type for the Applier. Mixing fragment types or mixing
// fragment-level and file-level applies results in an error.
type Applier struct {
src io.ReaderAt
lineSrc LineReaderAt
nextLine int64
applyType int
}
// NewApplier creates an Applier that reads data from src. If src is a
// LineReaderAt, it is used directly to apply text fragments.
func NewApplier(src io.ReaderAt) *Applier {
a := new(Applier)
a.Reset(src)
return a
}
// Reset resets the input and internal state of the Applier. If src is nil, the
// existing source is reused.
func (a *Applier) Reset(src io.ReaderAt) {
if src != nil {
a.src = src
if lineSrc, ok := src.(LineReaderAt); ok {
a.lineSrc = lineSrc
} else {
a.lineSrc = &lineReaderAt{r: src}
}
}
a.nextLine = 0
a.applyType = applyInitial
}
// ApplyFile applies the changes in all of the fragments of f and writes the
// result to dst.
func (a *Applier) ApplyFile(dst io.Writer, f *File) error {
if a.applyType != applyInitial {
return applyError(errApplyInProgress)
}
defer func() { a.applyType = applyFile }()
if f.IsBinary && len(f.TextFragments) > 0 {
return applyError(errors.New("binary file contains text fragments"))
}
if !f.IsBinary && f.BinaryFragment != nil {
return applyError(errors.New("text file contains binary fragment"))
}
switch {
case f.BinaryFragment != nil:
return a.ApplyBinaryFragment(dst, f.BinaryFragment)
case len(f.TextFragments) > 0:
frags := make([]*TextFragment, len(f.TextFragments))
copy(frags, f.TextFragments)
sort.Slice(frags, func(i, j int) bool {
return frags[i].OldPosition < frags[j].OldPosition
})
// TODO(bkeyes): consider merging overlapping fragments
// right now, the application fails if fragments overlap, but it should be
// possible to precompute the result of applying them in order
for i, frag := range frags {
if err := a.ApplyTextFragment(dst, frag); err != nil {
return applyError(err, fragNum(i))
}
}
}
return applyError(a.Flush(dst))
}
// ApplyTextFragment applies the changes in the fragment f and writes unwritten
// data before the start of the fragment and the result to dst. If multiple
// text fragments apply to the same source, ApplyTextFragment must be called in
// order of increasing start position. As a result, each fragment can be
// applied at most once before a call to Reset.
func (a *Applier) ApplyTextFragment(dst io.Writer, f *TextFragment) error {
if a.applyType != applyInitial && a.applyType != applyText {
return applyError(errApplyInProgress)
}
defer func() { a.applyType = applyText }()
// application code assumes fragment fields are consistent
if err := f.Validate(); err != nil {
return applyError(err)
}
// lines are 0-indexed, positions are 1-indexed (but new files have position = 0)
fragStart := f.OldPosition - 1
if fragStart < 0 {
fragStart = 0
}
fragEnd := fragStart + f.OldLines
start := a.nextLine
if fragStart < start {
return applyError(&Conflict{"fragment overlaps with an applied fragment"})
}
if f.OldPosition == 0 {
ok, err := isLen(a.src, 0)
if err != nil {
return applyError(err)
}
if !ok {
return applyError(&Conflict{"cannot create new file from non-empty src"})
}
}
preimage := make([][]byte, fragEnd-start)
n, err := a.lineSrc.ReadLinesAt(preimage, start)
switch {
case err == nil:
case err == io.EOF && n == len(preimage): // last line of frag has no newline character
default:
return applyError(err, lineNum(start+int64(n)))
}
// copy leading data before the fragment starts
for i, line := range preimage[:fragStart-start] {
if _, err := dst.Write(line); err != nil {
a.nextLine = start + int64(i)
return applyError(err, lineNum(a.nextLine))
}
}
preimage = preimage[fragStart-start:]
// apply the changes in the fragment
used := int64(0)
for i, line := range f.Lines {
if err := applyTextLine(dst, line, preimage, used); err != nil {
a.nextLine = fragStart + used
return applyError(err, lineNum(a.nextLine), fragLineNum(i))
}
if line.Old() {
used++
}
}
a.nextLine = fragStart + used
// new position of +0,0 mean a full delete, so check for leftovers
if f.NewPosition == 0 && f.NewLines == 0 {
var b [1][]byte
n, err := a.lineSrc.ReadLinesAt(b[:], a.nextLine)
if err != nil && err != io.EOF {
return applyError(err, lineNum(a.nextLine))
}
if n > 0 {
return applyError(&Conflict{"src still has content after full delete"}, lineNum(a.nextLine))
}
}
return nil
}
func applyTextLine(dst io.Writer, line Line, preimage [][]byte, i int64) (err error) {
if line.Old() && string(preimage[i]) != line.Line {
return &Conflict{"fragment line does not match src line"}
}
if line.New() {
_, err = io.WriteString(dst, line.Line)
}
return err
}
// Flush writes any data following the last applied fragment to dst.
func (a *Applier) Flush(dst io.Writer) (err error) {
switch a.applyType {
case applyInitial:
_, err = copyFrom(dst, a.src, 0)
case applyText:
_, err = copyLinesFrom(dst, a.lineSrc, a.nextLine)
case applyBinary:
// nothing to flush, binary apply "consumes" full source
}
return err
}
// ApplyBinaryFragment applies the changes in the fragment f and writes the
// result to dst. At most one binary fragment can be applied before a call to
// Reset.
func (a *Applier) ApplyBinaryFragment(dst io.Writer, f *BinaryFragment) error {
if a.applyType != applyInitial {
return applyError(errApplyInProgress)
}
defer func() { a.applyType = applyBinary }()
if f == nil {
return applyError(errors.New("nil fragment"))
}
switch f.Method {
case BinaryPatchLiteral:
if _, err := dst.Write(f.Data); err != nil {
return applyError(err)
}
case BinaryPatchDelta:
if err := applyBinaryDeltaFragment(dst, a.src, f.Data); err != nil {
return applyError(err)
}
default:
return applyError(fmt.Errorf("unsupported binary patch method: %v", f.Method))
}
return nil
}
func applyBinaryDeltaFragment(dst io.Writer, src io.ReaderAt, frag []byte) error {
srcSize, delta := readBinaryDeltaSize(frag)
if err := checkBinarySrcSize(src, srcSize); err != nil {
return err
}
dstSize, delta := readBinaryDeltaSize(delta)
for len(delta) > 0 {
op := delta[0]
if op == 0 {
return errors.New("invalid delta opcode 0")
}
var n int64
var err error
switch op & 0x80 {
case 0x80:
n, delta, err = applyBinaryDeltaCopy(dst, op, delta[1:], src)
case 0x00:
n, delta, err = applyBinaryDeltaAdd(dst, op, delta[1:])
}
if err != nil {
return err
}
dstSize -= n
}
if dstSize != 0 {
return errors.New("corrupt binary delta: insufficient or extra data")
}
return nil
}
// readBinaryDeltaSize reads a variable length size from a delta-encoded binary
// fragment, returing the size and the unused data. Data is encoded as:
//
// [[1xxxxxxx]...] [0xxxxxxx]
//
// in little-endian order, with 7 bits of the value per byte.
func readBinaryDeltaSize(d []byte) (size int64, rest []byte) {
shift := uint(0)
for i, b := range d {
size |= int64(b&0x7F) << shift
shift += 7
if b <= 0x7F {
return size, d[i+1:]
}
}
return size, nil
}
// applyBinaryDeltaAdd applies an add opcode in a delta-encoded binary
// fragment, returning the amount of data written and the usused part of the
// fragment. An add operation takes the form:
//
// [0xxxxxx][[data1]...]
//
// where the lower seven bits of the opcode is the number of data bytes
// following the opcode. See also pack-format.txt in the Git source.
func applyBinaryDeltaAdd(w io.Writer, op byte, delta []byte) (n int64, rest []byte, err error) {
size := int(op)
if len(delta) < size {
return 0, delta, errors.New("corrupt binary delta: incomplete add")
}
_, err = w.Write(delta[:size])
return int64(size), delta[size:], err
}
// applyBinaryDeltaCopy applies a copy opcode in a delta-encoded binary
// fragment, returing the amount of data written and the unused part of the
// fragment. A copy operation takes the form:
//
// [1xxxxxxx][offset1][offset2][offset3][offset4][size1][size2][size3]
//
// where the lower seven bits of the opcode determine which non-zero offset and
// size bytes are present in little-endian order: if bit 0 is set, offset1 is
// present, etc. If no offset or size bytes are present, offset is 0 and size
// is 0x10000. See also pack-format.txt in the Git source.
func applyBinaryDeltaCopy(w io.Writer, op byte, delta []byte, src io.ReaderAt) (n int64, rest []byte, err error) {
const defaultSize = 0x10000
unpack := func(start, bits uint) (v int64) {
for i := uint(0); i < bits; i++ {
mask := byte(1 << (i + start))
if op&mask > 0 {
if len(delta) == 0 {
err = errors.New("corrupt binary delta: incomplete copy")
return
}
v |= int64(delta[0]) << (8 * i)
delta = delta[1:]
}
}
return
}
offset := unpack(0, 4)
size := unpack(4, 3)
if err != nil {
return 0, delta, err
}
if size == 0 {
size = defaultSize
}
// TODO(bkeyes): consider pooling these buffers
b := make([]byte, size)
if _, err := src.ReadAt(b, offset); err != nil {
return 0, delta, err
}
_, err = w.Write(b)
return size, delta, err
}
func checkBinarySrcSize(r io.ReaderAt, size int64) error {
ok, err := isLen(r, size)
if err != nil {
return err
}
if !ok {
return &Conflict{"fragment src size does not match actual src size"}
}
return nil
}

View File

@ -0,0 +1,52 @@
package gitdiff
import (
"fmt"
)
var (
b85Table map[byte]byte
b85Alpha = []byte(
"0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "!#$%&()*+-;<=>?@^_`{|}~",
)
)
func init() {
b85Table = make(map[byte]byte)
for i, c := range b85Alpha {
b85Table[c] = byte(i)
}
}
// base85Decode decodes Base85-encoded data from src into dst. It uses the
// alphabet defined by base85.c in the Git source tree, which appears to be
// unique. src must contain at least len(dst) bytes of encoded data.
func base85Decode(dst, src []byte) error {
var v uint32
var n, ndst int
for i, b := range src {
if b, ok := b85Table[b]; ok {
v = 85*v + uint32(b)
n++
} else {
return fmt.Errorf("invalid base85 byte at index %d: 0x%X", i, src[i])
}
if n == 5 {
rem := len(dst) - ndst
for j := 0; j < 4 && j < rem; j++ {
dst[ndst] = byte(v >> 24)
ndst++
v <<= 8
}
v = 0
n = 0
}
}
if n > 0 {
return fmt.Errorf("base85 data terminated by underpadded sequence")
}
if ndst < len(dst) {
return fmt.Errorf("base85 data underrun: %d < %d", ndst, len(dst))
}
return nil
}

View File

@ -0,0 +1,179 @@
package gitdiff
import (
"bytes"
"compress/zlib"
"fmt"
"io"
"io/ioutil"
"strconv"
"strings"
)
func (p *parser) ParseBinaryFragments(f *File) (n int, err error) {
isBinary, hasData, err := p.ParseBinaryMarker()
if err != nil || !isBinary {
return 0, err
}
f.IsBinary = true
if !hasData {
return 0, nil
}
forward, err := p.ParseBinaryFragmentHeader()
if err != nil {
return 0, err
}
if forward == nil {
return 0, p.Errorf(0, "missing data for binary patch")
}
if err := p.ParseBinaryChunk(forward); err != nil {
return 0, err
}
f.BinaryFragment = forward
// valid for reverse to not exist, but it must be valid if present
reverse, err := p.ParseBinaryFragmentHeader()
if err != nil {
return 1, err
}
if reverse == nil {
return 1, nil
}
if err := p.ParseBinaryChunk(reverse); err != nil {
return 1, err
}
f.ReverseBinaryFragment = reverse
return 1, nil
}
func (p *parser) ParseBinaryMarker() (isBinary bool, hasData bool, err error) {
switch p.Line(0) {
case "GIT binary patch\n":
hasData = true
case "Binary files differ\n":
case "Files differ\n":
default:
return false, false, nil
}
if err = p.Next(); err != nil && err != io.EOF {
return false, false, err
}
return true, hasData, nil
}
func (p *parser) ParseBinaryFragmentHeader() (*BinaryFragment, error) {
parts := strings.SplitN(strings.TrimSuffix(p.Line(0), "\n"), " ", 2)
if len(parts) < 2 {
return nil, nil
}
frag := &BinaryFragment{}
switch parts[0] {
case "delta":
frag.Method = BinaryPatchDelta
case "literal":
frag.Method = BinaryPatchLiteral
default:
return nil, nil
}
var err error
if frag.Size, err = strconv.ParseInt(parts[1], 10, 64); err != nil {
nerr := err.(*strconv.NumError)
return nil, p.Errorf(0, "binary patch: invalid size: %v", nerr.Err)
}
if err := p.Next(); err != nil && err != io.EOF {
return nil, err
}
return frag, nil
}
func (p *parser) ParseBinaryChunk(frag *BinaryFragment) error {
// Binary fragments are encoded as a series of base85 encoded lines. Each
// line starts with a character in [A-Za-z] giving the number of bytes on
// the line, where A = 1 and z = 52, and ends with a newline character.
//
// The base85 encoding means each line is a multiple of 5 characters + 2
// additional characters for the length byte and the newline. The fragment
// ends with a blank line.
const (
shortestValidLine = "A00000\n"
maxBytesPerLine = 52
)
var data bytes.Buffer
buf := make([]byte, maxBytesPerLine)
for {
line := p.Line(0)
if line == "\n" {
break
}
if len(line) < len(shortestValidLine) || (len(line)-2)%5 != 0 {
return p.Errorf(0, "binary patch: corrupt data line")
}
byteCount, seq := int(line[0]), line[1:len(line)-1]
switch {
case 'A' <= byteCount && byteCount <= 'Z':
byteCount = byteCount - 'A' + 1
case 'a' <= byteCount && byteCount <= 'z':
byteCount = byteCount - 'a' + 27
default:
return p.Errorf(0, "binary patch: invalid length byte")
}
// base85 encodes every 4 bytes into 5 characters, with up to 3 bytes of end padding
maxByteCount := len(seq) / 5 * 4
if byteCount > maxByteCount || byteCount < maxByteCount-3 {
return p.Errorf(0, "binary patch: incorrect byte count")
}
if err := base85Decode(buf[:byteCount], []byte(seq)); err != nil {
return p.Errorf(0, "binary patch: %v", err)
}
data.Write(buf[:byteCount])
if err := p.Next(); err != nil {
if err == io.EOF {
return p.Errorf(0, "binary patch: unexpected EOF")
}
return err
}
}
if err := inflateBinaryChunk(frag, &data); err != nil {
return p.Errorf(0, "binary patch: %v", err)
}
// consume the empty line that ended the fragment
if err := p.Next(); err != nil && err != io.EOF {
return err
}
return nil
}
func inflateBinaryChunk(frag *BinaryFragment, r io.Reader) error {
zr, err := zlib.NewReader(r)
if err != nil {
return err
}
data, err := ioutil.ReadAll(zr)
if err != nil {
return err
}
if err := zr.Close(); err != nil {
return err
}
if int64(len(data)) != frag.Size {
return fmt.Errorf("%d byte fragment inflated to %d", frag.Size, len(data))
}
frag.Data = data
return nil
}

View File

@ -0,0 +1,470 @@
package gitdiff
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
)
const (
devNull = "/dev/null"
)
// ParseNextFileHeader finds and parses the next file header in the stream. If
// a header is found, it returns a file and all input before the header. It
// returns nil if no headers are found before the end of the input.
func (p *parser) ParseNextFileHeader() (*File, string, error) {
var preamble strings.Builder
var file *File
for {
// check for disconnected fragment headers (corrupt patch)
frag, err := p.ParseTextFragmentHeader()
if err != nil {
// not a valid header, nothing to worry about
goto NextLine
}
if frag != nil {
return nil, "", p.Errorf(-1, "patch fragment without file header: %s", frag.Header())
}
// check for a git-generated patch
file, err = p.ParseGitFileHeader()
if err != nil {
return nil, "", err
}
if file != nil {
return file, preamble.String(), nil
}
// check for a "traditional" patch
file, err = p.ParseTraditionalFileHeader()
if err != nil {
return nil, "", err
}
if file != nil {
return file, preamble.String(), nil
}
NextLine:
preamble.WriteString(p.Line(0))
if err := p.Next(); err != nil {
if err == io.EOF {
break
}
return nil, "", err
}
}
return nil, "", nil
}
func (p *parser) ParseGitFileHeader() (*File, error) {
const prefix = "diff --git "
if !strings.HasPrefix(p.Line(0), prefix) {
return nil, nil
}
header := p.Line(0)[len(prefix):]
defaultName, err := parseGitHeaderName(header)
if err != nil {
return nil, p.Errorf(0, "git file header: %v", err)
}
f := &File{}
for {
end, err := parseGitHeaderData(f, p.Line(1), defaultName)
if err != nil {
return nil, p.Errorf(1, "git file header: %v", err)
}
if err := p.Next(); err != nil {
if err == io.EOF {
break
}
return nil, err
}
if end {
break
}
}
if f.OldName == "" && f.NewName == "" {
if defaultName == "" {
return nil, p.Errorf(0, "git file header: missing filename information")
}
f.OldName = defaultName
f.NewName = defaultName
}
if (f.NewName == "" && !f.IsDelete) || (f.OldName == "" && !f.IsNew) {
return nil, p.Errorf(0, "git file header: missing filename information")
}
return f, nil
}
func (p *parser) ParseTraditionalFileHeader() (*File, error) {
const shortestValidFragHeader = "@@ -1 +1 @@\n"
const (
oldPrefix = "--- "
newPrefix = "+++ "
)
oldLine, newLine := p.Line(0), p.Line(1)
if !strings.HasPrefix(oldLine, oldPrefix) || !strings.HasPrefix(newLine, newPrefix) {
return nil, nil
}
// heuristic: only a file header if followed by a (probable) fragment header
if len(p.Line(2)) < len(shortestValidFragHeader) || !strings.HasPrefix(p.Line(2), "@@ -") {
return nil, nil
}
// advance past the first two lines so parser is after the header
// no EOF check needed because we know there are >=3 valid lines
if err := p.Next(); err != nil {
return nil, err
}
if err := p.Next(); err != nil {
return nil, err
}
oldName, _, err := parseName(oldLine[len(oldPrefix):], '\t', 0)
if err != nil {
return nil, p.Errorf(0, "file header: %v", err)
}
newName, _, err := parseName(newLine[len(newPrefix):], '\t', 0)
if err != nil {
return nil, p.Errorf(1, "file header: %v", err)
}
f := &File{}
switch {
case oldName == devNull || hasEpochTimestamp(oldLine):
f.IsNew = true
f.NewName = newName
case newName == devNull || hasEpochTimestamp(newLine):
f.IsDelete = true
f.OldName = oldName
default:
// if old name is a prefix of new name, use that instead
// this avoids picking variants like "file.bak" or "file~"
if strings.HasPrefix(newName, oldName) {
f.OldName = oldName
f.NewName = oldName
} else {
f.OldName = newName
f.NewName = newName
}
}
return f, nil
}
// parseGitHeaderName extracts a default file name from the Git file header
// line. This is required for mode-only changes and creation/deletion of empty
// files. Other types of patch include the file name(s) in the header data.
// If the names in the header do not match because the patch is a rename,
// return an empty default name.
func parseGitHeaderName(header string) (string, error) {
firstName, n, err := parseName(header, -1, 1)
if err != nil {
return "", err
}
if n < len(header) && (header[n] == ' ' || header[n] == '\t') {
n++
}
secondName, _, err := parseName(header[n:], -1, 1)
if err != nil {
return "", err
}
if firstName != secondName {
return "", nil
}
return firstName, nil
}
// parseGitHeaderData parses a single line of metadata from a Git file header.
// It returns true when header parsing is complete; in that case, line was the
// first line of non-header content.
func parseGitHeaderData(f *File, line, defaultName string) (end bool, err error) {
if len(line) > 0 && line[len(line)-1] == '\n' {
line = line[:len(line)-1]
}
for _, hdr := range []struct {
prefix string
end bool
parse func(*File, string, string) error
}{
{"@@ -", true, nil},
{"--- ", false, parseGitHeaderOldName},
{"+++ ", false, parseGitHeaderNewName},
{"old mode ", false, parseGitHeaderOldMode},
{"new mode ", false, parseGitHeaderNewMode},
{"deleted file mode ", false, parseGitHeaderDeletedMode},
{"new file mode ", false, parseGitHeaderCreatedMode},
{"copy from ", false, parseGitHeaderCopyFrom},
{"copy to ", false, parseGitHeaderCopyTo},
{"rename old ", false, parseGitHeaderRenameFrom},
{"rename new ", false, parseGitHeaderRenameTo},
{"rename from ", false, parseGitHeaderRenameFrom},
{"rename to ", false, parseGitHeaderRenameTo},
{"similarity index ", false, parseGitHeaderScore},
{"dissimilarity index ", false, parseGitHeaderScore},
{"index ", false, parseGitHeaderIndex},
} {
if strings.HasPrefix(line, hdr.prefix) {
if hdr.parse != nil {
err = hdr.parse(f, line[len(hdr.prefix):], defaultName)
}
return hdr.end, err
}
}
// unknown line indicates the end of the header
// this usually happens if the diff is empty
return true, nil
}
func parseGitHeaderOldName(f *File, line, defaultName string) error {
name, _, err := parseName(line, '\t', 1)
if err != nil {
return err
}
if f.OldName == "" && !f.IsNew {
f.OldName = name
return nil
}
return verifyGitHeaderName(name, f.OldName, f.IsNew, "old")
}
func parseGitHeaderNewName(f *File, line, defaultName string) error {
name, _, err := parseName(line, '\t', 1)
if err != nil {
return err
}
if f.NewName == "" && !f.IsDelete {
f.NewName = name
return nil
}
return verifyGitHeaderName(name, f.NewName, f.IsDelete, "new")
}
func parseGitHeaderOldMode(f *File, line, defaultName string) (err error) {
f.OldMode, err = parseMode(line)
return
}
func parseGitHeaderNewMode(f *File, line, defaultName string) (err error) {
f.NewMode, err = parseMode(line)
return
}
func parseGitHeaderDeletedMode(f *File, line, defaultName string) error {
f.IsDelete = true
f.OldName = defaultName
return parseGitHeaderOldMode(f, line, defaultName)
}
func parseGitHeaderCreatedMode(f *File, line, defaultName string) error {
f.IsNew = true
f.NewName = defaultName
return parseGitHeaderNewMode(f, line, defaultName)
}
func parseGitHeaderCopyFrom(f *File, line, defaultName string) (err error) {
f.IsCopy = true
f.OldName, _, err = parseName(line, -1, 0)
return
}
func parseGitHeaderCopyTo(f *File, line, defaultName string) (err error) {
f.IsCopy = true
f.NewName, _, err = parseName(line, -1, 0)
return
}
func parseGitHeaderRenameFrom(f *File, line, defaultName string) (err error) {
f.IsRename = true
f.OldName, _, err = parseName(line, -1, 0)
return
}
func parseGitHeaderRenameTo(f *File, line, defaultName string) (err error) {
f.IsRename = true
f.NewName, _, err = parseName(line, -1, 0)
return
}
func parseGitHeaderScore(f *File, line, defaultName string) error {
score, err := strconv.ParseInt(strings.TrimSuffix(line, "%"), 10, 32)
if err != nil {
nerr := err.(*strconv.NumError)
return fmt.Errorf("invalid score line: %v", nerr.Err)
}
if score <= 100 {
f.Score = int(score)
}
return nil
}
func parseGitHeaderIndex(f *File, line, defaultName string) error {
const sep = ".."
// note that git stops parsing if the OIDs are too long to be valid
// checking this requires knowing if the repository uses SHA1 or SHA256
// hashes, which we don't know, so we just skip that check
parts := strings.SplitN(line, " ", 2)
oids := strings.SplitN(parts[0], sep, 2)
if len(oids) < 2 {
return fmt.Errorf("invalid index line: missing %q", sep)
}
f.OldOIDPrefix, f.NewOIDPrefix = oids[0], oids[1]
if len(parts) > 1 {
return parseGitHeaderOldMode(f, parts[1], defaultName)
}
return nil
}
func parseMode(s string) (os.FileMode, error) {
mode, err := strconv.ParseInt(s, 8, 32)
if err != nil {
nerr := err.(*strconv.NumError)
return os.FileMode(0), fmt.Errorf("invalid mode line: %v", nerr.Err)
}
return os.FileMode(mode), nil
}
// parseName extracts a file name from the start of a string and returns the
// name and the index of the first character after the name. If the name is
// unquoted and term is non-negative, parsing stops at the first occurrence of
// term. Otherwise parsing of unquoted names stops at the first space or tab.
//
// If the name is exactly "/dev/null", no further processing occurs. Otherwise,
// if dropPrefix is greater than zero, that number of prefix components
// separated by forward slashes are dropped from the name and any duplicate
// slashes are collapsed.
func parseName(s string, term rune, dropPrefix int) (name string, n int, err error) {
if len(s) > 0 && s[0] == '"' {
name, n, err = parseQuotedName(s)
} else {
name, n, err = parseUnquotedName(s, term)
}
if err != nil {
return "", 0, err
}
if name == devNull {
return name, n, nil
}
return cleanName(name, dropPrefix), n, nil
}
func parseQuotedName(s string) (name string, n int, err error) {
for n = 1; n < len(s); n++ {
if s[n] == '"' && s[n-1] != '\\' {
n++
break
}
}
if n == 2 {
return "", 0, fmt.Errorf("missing name")
}
if name, err = strconv.Unquote(s[:n]); err != nil {
return "", 0, err
}
return name, n, err
}
func parseUnquotedName(s string, term rune) (name string, n int, err error) {
for n = 0; n < len(s); n++ {
if s[n] == '\n' {
break
}
if term >= 0 && rune(s[n]) == term {
break
}
if term < 0 && (s[n] == ' ' || s[n] == '\t') {
break
}
}
if n == 0 {
return "", 0, fmt.Errorf("missing name")
}
return s[:n], n, nil
}
// verifyGitHeaderName checks a parsed name against state set by previous lines
func verifyGitHeaderName(parsed, existing string, isNull bool, side string) error {
if existing != "" {
if isNull {
return fmt.Errorf("expected %s, but filename is set to %s", devNull, existing)
}
if existing != parsed {
return fmt.Errorf("inconsistent %s filename", side)
}
}
if isNull && parsed != devNull {
return fmt.Errorf("expected %s", devNull)
}
return nil
}
// cleanName removes double slashes and drops prefix segments.
func cleanName(name string, drop int) string {
var b strings.Builder
for i := 0; i < len(name); i++ {
if name[i] == '/' {
if i < len(name)-1 && name[i+1] == '/' {
continue
}
if drop > 0 {
drop--
b.Reset()
continue
}
}
b.WriteByte(name[i])
}
return b.String()
}
// hasEpochTimestamp returns true if the string ends with a POSIX-formatted
// timestamp for the UNIX epoch after a tab character. According to git, this
// is used by GNU diff to mark creations and deletions.
func hasEpochTimestamp(s string) bool {
const posixTimeLayout = "2006-01-02 15:04:05.9 -0700"
start := strings.IndexRune(s, '\t')
if start < 0 {
return false
}
ts := strings.TrimSuffix(s[start+1:], "\n")
// a valid timestamp can have optional ':' in zone specifier
// remove that if it exists so we have a single format
if ts[len(ts)-3] == ':' {
ts = ts[:len(ts)-3] + ts[len(ts)-2:]
}
t, err := time.Parse(posixTimeLayout, ts)
if err != nil {
return false
}
if !t.Equal(time.Unix(0, 0)) {
return false
}
return true
}

View File

@ -0,0 +1,199 @@
package gitdiff
import (
"errors"
"fmt"
"os"
)
// File describes changes to a single file. It can be either a text file or a
// binary file.
type File struct {
OldName string
NewName string
IsNew bool
IsDelete bool
IsCopy bool
IsRename bool
OldMode os.FileMode
NewMode os.FileMode
OldOIDPrefix string
NewOIDPrefix string
Score int
// TextFragments contains the fragments describing changes to a text file. It
// may be empty if the file is empty or if only the mode changes.
TextFragments []*TextFragment
// IsBinary is true if the file is a binary file. If the patch includes
// binary data, BinaryFragment will be non-nil and describe the changes to
// the data. If the patch is reversible, ReverseBinaryFragment will also be
// non-nil and describe the changes needed to restore the original file
// after applying the changes in BinaryFragment.
IsBinary bool
BinaryFragment *BinaryFragment
ReverseBinaryFragment *BinaryFragment
}
// TextFragment describes changed lines starting at a specific line in a text file.
type TextFragment struct {
Comment string
OldPosition int64
OldLines int64
NewPosition int64
NewLines int64
LinesAdded int64
LinesDeleted int64
LeadingContext int64
TrailingContext int64
Lines []Line
}
// Header returns the canonical header of this fragment.
func (f *TextFragment) Header() string {
return fmt.Sprintf("@@ -%d,%d +%d,%d @@ %s", f.OldPosition, f.OldLines, f.NewPosition, f.NewLines, f.Comment)
}
// Validate checks that the fragment is self-consistent and appliable. Validate
// returns an error if and only if the fragment is invalid.
func (f *TextFragment) Validate() error {
if f == nil {
return errors.New("nil fragment")
}
var (
oldLines, newLines int64
leadingContext, trailingContext int64
contextLines, addedLines, deletedLines int64
)
// count the types of lines in the fragment content
for i, line := range f.Lines {
switch line.Op {
case OpContext:
oldLines++
newLines++
contextLines++
if addedLines == 0 && deletedLines == 0 {
leadingContext++
} else {
trailingContext++
}
case OpAdd:
newLines++
addedLines++
trailingContext = 0
case OpDelete:
oldLines++
deletedLines++
trailingContext = 0
default:
return fmt.Errorf("unknown operator %q on line %d", line.Op, i+1)
}
}
// check the actual counts against the reported counts
if oldLines != f.OldLines {
return lineCountErr("old", oldLines, f.OldLines)
}
if newLines != f.NewLines {
return lineCountErr("new", newLines, f.NewLines)
}
if leadingContext != f.LeadingContext {
return lineCountErr("leading context", leadingContext, f.LeadingContext)
}
if trailingContext != f.TrailingContext {
return lineCountErr("trailing context", trailingContext, f.TrailingContext)
}
if addedLines != f.LinesAdded {
return lineCountErr("added", addedLines, f.LinesAdded)
}
if deletedLines != f.LinesDeleted {
return lineCountErr("deleted", deletedLines, f.LinesDeleted)
}
// if a file is being created, it can only contain additions
if f.OldPosition == 0 && f.OldLines != 0 {
return errors.New("file creation fragment contains context or deletion lines")
}
return nil
}
func lineCountErr(kind string, actual, reported int64) error {
return fmt.Errorf("fragment contains %d %s lines but reports %d", actual, kind, reported)
}
// Line is a line in a text fragment.
type Line struct {
Op LineOp
Line string
}
func (fl Line) String() string {
return fl.Op.String() + fl.Line
}
// Old returns true if the line appears in the old content of the fragment.
func (fl Line) Old() bool {
return fl.Op == OpContext || fl.Op == OpDelete
}
// New returns true if the line appears in the new content of the fragment.
func (fl Line) New() bool {
return fl.Op == OpContext || fl.Op == OpAdd
}
// NoEOL returns true if the line is missing a trailing newline character.
func (fl Line) NoEOL() bool {
return len(fl.Line) == 0 || fl.Line[len(fl.Line)-1] != '\n'
}
// LineOp describes the type of a text fragment line: context, added, or removed.
type LineOp int
const (
// OpContext indicates a context line
OpContext LineOp = iota
// OpDelete indicates a deleted line
OpDelete
// OpAdd indicates an added line
OpAdd
)
func (op LineOp) String() string {
switch op {
case OpContext:
return " "
case OpDelete:
return "-"
case OpAdd:
return "+"
}
return "?"
}
// BinaryFragment describes changes to a binary file.
type BinaryFragment struct {
Method BinaryPatchMethod
Size int64
Data []byte
}
// BinaryPatchMethod is the method used to create and apply the binary patch.
type BinaryPatchMethod int
const (
// BinaryPatchDelta indicates the data uses Git's packfile encoding
BinaryPatchDelta BinaryPatchMethod = iota
// BinaryPatchLiteral indicates the data is the exact file content
BinaryPatchLiteral
)

220
vendor/github.com/bluekeyes/go-gitdiff/gitdiff/io.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
package gitdiff
import (
"errors"
"io"
)
// LineReaderAt is the interface that wraps the ReadLinesAt method.
//
// ReadLinesAt reads len(lines) into lines starting at line offset in the
// input source. It returns number of full lines read (0 <= n <= len(lines))
// and any error encountered. Line numbers are zero-indexed.
//
// If n < len(lines), ReadLinesAt returns a non-nil error explaining why more
// lines were not returned.
//
// Each full line includes the line ending character(s). If the last line of
// the input does not have a line ending character, ReadLinesAt returns the
// content of the line and io.EOF.
//
// If the content of the input source changes after the first call to
// ReadLinesAt, the behavior of future calls is undefined.
type LineReaderAt interface {
ReadLinesAt(lines [][]byte, offset int64) (n int, err error)
}
type lineReaderAt struct {
r io.ReaderAt
index []int64
eof bool
}
func (r *lineReaderAt) ReadLinesAt(lines [][]byte, offset int64) (n int, err error) {
if offset < 0 {
return 0, errors.New("ReadLinesAt: negative offset")
}
if len(lines) == 0 {
return 0, nil
}
count := len(lines)
startLine := offset
endLine := startLine + int64(count)
if endLine > int64(len(r.index)) && !r.eof {
if err := r.indexTo(endLine); err != nil {
return 0, err
}
}
if startLine >= int64(len(r.index)) {
return 0, io.EOF
}
buf, byteOffset, err := r.readBytes(startLine, int64(count))
if err != nil {
return 0, err
}
for n = 0; n < count && startLine+int64(n) < int64(len(r.index)); n++ {
lineno := startLine + int64(n)
start, end := int64(0), r.index[lineno]-byteOffset
if lineno > 0 {
start = r.index[lineno-1] - byteOffset
}
lines[n] = buf[start:end]
}
if n < count || buf[len(buf)-1] != '\n' {
return n, io.EOF
}
return n, nil
}
// indexTo reads data and computes the line index until there is information
// for line or a read returns io.EOF. It returns an error if and only if there
// is an error reading data.
func (r *lineReaderAt) indexTo(line int64) error {
var buf [1024]byte
var offset int64
if len(r.index) > 0 {
offset = r.index[len(r.index)-1]
}
for int64(len(r.index)) < line {
n, err := r.r.ReadAt(buf[:], offset)
if err != nil && err != io.EOF {
return err
}
for _, b := range buf[:n] {
offset++
if b == '\n' {
r.index = append(r.index, offset)
}
}
if err == io.EOF {
if n > 0 && buf[n-1] != '\n' {
r.index = append(r.index, offset)
}
r.eof = true
break
}
}
return nil
}
// readBytes reads the bytes of the n lines starting at line and returns the
// bytes and the offset of the first byte in the underlying source.
func (r *lineReaderAt) readBytes(line, n int64) (b []byte, offset int64, err error) {
indexLen := int64(len(r.index))
var size int64
if line > indexLen {
offset = r.index[indexLen-1]
} else if line > 0 {
offset = r.index[line-1]
}
if n > 0 {
if line+n > indexLen {
size = r.index[indexLen-1] - offset
} else {
size = r.index[line+n-1] - offset
}
}
b = make([]byte, size)
if _, err := r.r.ReadAt(b, offset); err != nil {
if err == io.EOF {
err = errors.New("ReadLinesAt: corrupt line index or changed source data")
}
return nil, 0, err
}
return b, offset, nil
}
func isLen(r io.ReaderAt, n int64) (bool, error) {
off := n - 1
if off < 0 {
off = 0
}
var b [2]byte
nr, err := r.ReadAt(b[:], off)
if err == io.EOF {
return (n == 0 && nr == 0) || (n > 0 && nr == 1), nil
}
return false, err
}
const (
byteBufferSize = 32 * 1024 // from io.Copy
lineBufferSize = 32
)
// copyFrom writes bytes starting from offset off in src to dst stopping at the
// end of src or at the first error. copyFrom returns the number of bytes
// written and any error.
func copyFrom(dst io.Writer, src io.ReaderAt, off int64) (written int64, err error) {
buf := make([]byte, byteBufferSize)
for {
nr, rerr := src.ReadAt(buf, off)
if nr > 0 {
nw, werr := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if werr != nil {
err = werr
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
off += int64(nr)
}
if rerr != nil {
if rerr != io.EOF {
err = rerr
}
break
}
}
return written, err
}
// copyLinesFrom writes lines starting from line off in src to dst stopping at
// the end of src or at the first error. copyLinesFrom returns the number of
// lines written and any error.
func copyLinesFrom(dst io.Writer, src LineReaderAt, off int64) (written int64, err error) {
buf := make([][]byte, lineBufferSize)
ReadLoop:
for {
nr, rerr := src.ReadLinesAt(buf, off)
if nr > 0 {
for _, line := range buf[0:nr] {
nw, werr := dst.Write(line)
if nw > 0 {
written++
}
if werr != nil {
err = werr
break ReadLoop
}
if len(line) != nw {
err = io.ErrShortWrite
break ReadLoop
}
}
off += int64(nr)
}
if rerr != nil {
if rerr != io.EOF {
err = rerr
}
break
}
}
return written, err
}

View File

@ -0,0 +1,138 @@
// Package gitdiff parses and applies patches generated by Git. It supports
// line-oriented text patches, binary patches, and can also parse standard
// unified diffs generated by other tools.
package gitdiff
import (
"bufio"
"fmt"
"io"
)
// Parse parses a patch with changes to one or more files. Any content before
// the first file is returned as the second value. If an error occurs while
// parsing, it returns all files parsed before the error.
func Parse(r io.Reader) ([]*File, string, error) {
p := newParser(r)
if err := p.Next(); err != nil {
if err == io.EOF {
return nil, "", nil
}
return nil, "", err
}
var preamble string
var files []*File
for {
file, pre, err := p.ParseNextFileHeader()
if err != nil {
return files, preamble, err
}
if file == nil {
break
}
for _, fn := range []func(*File) (int, error){
p.ParseTextFragments,
p.ParseBinaryFragments,
} {
n, err := fn(file)
if err != nil {
return files, preamble, err
}
if n > 0 {
break
}
}
if len(files) == 0 {
preamble = pre
}
files = append(files, file)
}
return files, preamble, nil
}
// TODO(bkeyes): consider exporting the parser type with configuration
// this would enable OID validation, p-value guessing, and prefix stripping
// by allowing users to set or override defaults
// parser invariants:
// - methods that parse objects:
// - start with the parser on the first line of the first object
// - if returning nil, do not advance
// - if returning an error, do not advance past the object
// - if returning an object, advance to the first line after the object
// - any exported parsing methods must initialize the parser by calling Next()
type stringReader interface {
ReadString(delim byte) (string, error)
}
type parser struct {
r stringReader
eof bool
lineno int64
lines [3]string
}
func newParser(r io.Reader) *parser {
if r, ok := r.(stringReader); ok {
return &parser{r: r}
}
return &parser{r: bufio.NewReader(r)}
}
// Next advances the parser by one line. It returns any error encountered while
// reading the line, including io.EOF when the end of stream is reached.
func (p *parser) Next() error {
if p.eof {
return io.EOF
}
if p.lineno == 0 {
// on first call to next, need to shift in all lines
for i := 0; i < len(p.lines)-1; i++ {
if err := p.shiftLines(); err != nil && err != io.EOF {
return err
}
}
}
err := p.shiftLines()
if err != nil && err != io.EOF {
return err
}
p.lineno++
if p.lines[0] == "" {
p.eof = true
return io.EOF
}
return nil
}
func (p *parser) shiftLines() (err error) {
for i := 0; i < len(p.lines)-1; i++ {
p.lines[i] = p.lines[i+1]
}
p.lines[len(p.lines)-1], err = p.r.ReadString('\n')
return
}
// Line returns a line from the parser without advancing it. A delta of 0
// returns the current line, while higher deltas return read-ahead lines. It
// returns an empty string if the delta is higher than the available lines,
// either because of the buffer size or because the parser reached the end of
// the input. Valid lines always contain at least a newline character.
func (p *parser) Line(delta uint) string {
return p.lines[delta]
}
// Errorf generates an error and appends the current line information.
func (p *parser) Errorf(delta int64, msg string, args ...interface{}) error {
return fmt.Errorf("gitdiff: line %d: %s", p.lineno+delta, fmt.Sprintf(msg, args...))
}

View File

@ -0,0 +1,455 @@
package gitdiff
import (
"bufio"
"errors"
"fmt"
"io"
"net/mail"
"strconv"
"strings"
"time"
"unicode"
)
const (
mailHeaderPrefix = "From "
prettyHeaderPrefix = "commit "
)
// PatchHeader is a parsed version of the preamble content that appears before
// the first diff in a patch. It includes metadata about the patch, such as the
// author and a subject.
type PatchHeader struct {
// The SHA of the commit the patch was generated from. Empty if the SHA is
// not included in the header.
SHA string
// The author details of the patch. If these details are not included in
// the header, Author is nil and AuthorDate is the zero time.
Author *PatchIdentity
AuthorDate time.Time
// The committer details of the patch. If these details are not included in
// the header, Committer is nil and CommitterDate is the zero time.
Committer *PatchIdentity
CommitterDate time.Time
// The title and body of the commit message describing the changes in the
// patch. Empty if no message is included in the header.
Title string
Body string
// If the preamble looks like an email, ParsePatchHeader will
// remove prefixes such as `Re: ` and `[PATCH v3 5/17]` from the
// Title and place them here.
SubjectPrefix string
// If the preamble looks like an email, and it contains a `---`
// line, that line will be removed and everything after it will be
// placed in BodyAppendix.
BodyAppendix string
}
// Message returns the commit message for the header. The message consists of
// the title and the body separated by an empty line.
func (h *PatchHeader) Message() string {
var msg strings.Builder
if h != nil {
msg.WriteString(h.Title)
if h.Body != "" {
msg.WriteString("\n\n")
msg.WriteString(h.Body)
}
}
return msg.String()
}
// PatchIdentity identifies a person who authored or committed a patch.
type PatchIdentity struct {
Name string
Email string
}
func (i PatchIdentity) String() string {
name := i.Name
if name == "" {
name = `""`
}
return fmt.Sprintf("%s <%s>", name, i.Email)
}
// ParsePatchIdentity parses a patch identity string. A valid string contains a
// non-empty name followed by an email address in angle brackets. Like Git,
// ParsePatchIdentity does not require that the email address is valid or
// properly formatted, only that it is non-empty. The name must not contain a
// left angle bracket, '<', and the email address must not contain a right
// angle bracket, '>'.
func ParsePatchIdentity(s string) (PatchIdentity, error) {
var emailStart, emailEnd int
for i, c := range s {
if c == '<' && emailStart == 0 {
emailStart = i + 1
}
if c == '>' && emailStart > 0 {
emailEnd = i
break
}
}
if emailStart > 0 && emailEnd == 0 {
return PatchIdentity{}, fmt.Errorf("invalid identity string: unclosed email section: %s", s)
}
var name, email string
if emailStart > 0 {
name = strings.TrimSpace(s[:emailStart-1])
}
if emailStart > 0 && emailEnd > 0 {
email = strings.TrimSpace(s[emailStart:emailEnd])
}
if name == "" || email == "" {
return PatchIdentity{}, fmt.Errorf("invalid identity string: %s", s)
}
return PatchIdentity{Name: name, Email: email}, nil
}
// ParsePatchDate parses a patch date string. It returns the parsed time or an
// error if s has an unknown format. ParsePatchDate supports the iso, rfc,
// short, raw, unix, and default formats (with local variants) used by the
// --date flag in Git.
func ParsePatchDate(s string) (time.Time, error) {
const (
isoFormat = "2006-01-02 15:04:05 -0700"
isoStrictFormat = "2006-01-02T15:04:05-07:00"
rfc2822Format = "Mon, 2 Jan 2006 15:04:05 -0700"
shortFormat = "2006-01-02"
defaultFormat = "Mon Jan 2 15:04:05 2006 -0700"
defaultLocalFormat = "Mon Jan 2 15:04:05 2006"
)
if s == "" {
return time.Time{}, nil
}
for _, fmt := range []string{
isoFormat,
isoStrictFormat,
rfc2822Format,
shortFormat,
defaultFormat,
defaultLocalFormat,
} {
if t, err := time.ParseInLocation(fmt, s, time.Local); err == nil {
return t, nil
}
}
// unix format
if unix, err := strconv.ParseInt(s, 10, 64); err == nil {
return time.Unix(unix, 0), nil
}
// raw format
if space := strings.IndexByte(s, ' '); space > 0 {
unix, uerr := strconv.ParseInt(s[:space], 10, 64)
zone, zerr := time.Parse("-0700", s[space+1:])
if uerr == nil && zerr == nil {
return time.Unix(unix, 0).In(zone.Location()), nil
}
}
return time.Time{}, fmt.Errorf("unknown date format: %s", s)
}
// ParsePatchHeader parses a preamble string as returned by Parse into a
// PatchHeader. Due to the variety of header formats, some fields of the parsed
// PatchHeader may be unset after parsing.
//
// Supported formats are the short, medium, full, fuller, and email pretty
// formats used by git diff, git log, and git show and the UNIX mailbox format
// used by git format-patch.
//
// If ParsePatchHeader detects that it is handling an email, it will
// remove extra content at the beginning of the title line, such as
// `[PATCH]` or `Re:` in the same way that `git mailinfo` does.
// SubjectPrefix will be set to the value of this removed string.
// (`git mailinfo` is the core part of `git am` that pulls information
// out of an individual mail.)
//
// Additionally, if ParsePatchHeader detects that it's handling an
// email, it will remove a `---` line and put anything after it into
// BodyAppendix.
//
// Those wishing the effect of a plain `git am` should use
// `PatchHeader.Title + "\n" + PatchHeader.Body` (or
// `PatchHeader.Message()`). Those wishing to retain the subject
// prefix and appendix material should use `PatchHeader.SubjectPrefix
// + PatchHeader.Title + "\n" + PatchHeader.Body + "\n" +
// PatchHeader.BodyAppendix`.
func ParsePatchHeader(s string) (*PatchHeader, error) {
r := bufio.NewReader(strings.NewReader(s))
var line string
for {
var err error
line, err = r.ReadString('\n')
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
line = strings.TrimSpace(line)
if len(line) > 0 {
break
}
}
switch {
case strings.HasPrefix(line, mailHeaderPrefix):
return parseHeaderMail(line, r)
case strings.HasPrefix(line, prettyHeaderPrefix):
return parseHeaderPretty(line, r)
}
return nil, errors.New("unrecognized patch header format")
}
func parseHeaderPretty(prettyLine string, r io.Reader) (*PatchHeader, error) {
const (
authorPrefix = "Author:"
commitPrefix = "Commit:"
datePrefix = "Date:"
authorDatePrefix = "AuthorDate:"
commitDatePrefix = "CommitDate:"
)
h := &PatchHeader{}
prettyLine = prettyLine[len(prettyHeaderPrefix):]
if i := strings.IndexByte(prettyLine, ' '); i > 0 {
h.SHA = prettyLine[:i]
} else {
h.SHA = prettyLine
}
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
// empty line marks end of fields, remaining lines are title/message
if strings.TrimSpace(line) == "" {
break
}
switch {
case strings.HasPrefix(line, authorPrefix):
u, err := ParsePatchIdentity(line[len(authorPrefix):])
if err != nil {
return nil, err
}
h.Author = &u
case strings.HasPrefix(line, commitPrefix):
u, err := ParsePatchIdentity(line[len(commitPrefix):])
if err != nil {
return nil, err
}
h.Committer = &u
case strings.HasPrefix(line, datePrefix):
d, err := ParsePatchDate(strings.TrimSpace(line[len(datePrefix):]))
if err != nil {
return nil, err
}
h.AuthorDate = d
case strings.HasPrefix(line, authorDatePrefix):
d, err := ParsePatchDate(strings.TrimSpace(line[len(authorDatePrefix):]))
if err != nil {
return nil, err
}
h.AuthorDate = d
case strings.HasPrefix(line, commitDatePrefix):
d, err := ParsePatchDate(strings.TrimSpace(line[len(commitDatePrefix):]))
if err != nil {
return nil, err
}
h.CommitterDate = d
}
}
if s.Err() != nil {
return nil, s.Err()
}
title, indent := scanMessageTitle(s)
if s.Err() != nil {
return nil, s.Err()
}
h.Title = title
if title != "" {
// Don't check for an appendix
body, _ := scanMessageBody(s, indent, false)
if s.Err() != nil {
return nil, s.Err()
}
h.Body = body
}
return h, nil
}
func scanMessageTitle(s *bufio.Scanner) (title string, indent string) {
var b strings.Builder
for i := 0; s.Scan(); i++ {
line := s.Text()
trimLine := strings.TrimSpace(line)
if trimLine == "" {
break
}
if i == 0 {
if start := strings.IndexFunc(line, func(c rune) bool { return !unicode.IsSpace(c) }); start > 0 {
indent = line[:start]
}
}
if b.Len() > 0 {
b.WriteByte(' ')
}
b.WriteString(trimLine)
}
return b.String(), indent
}
func scanMessageBody(s *bufio.Scanner, indent string, separateAppendix bool) (string, string) {
// Body and appendix
var body, appendix strings.Builder
c := &body
var empty int
for i := 0; s.Scan(); i++ {
line := s.Text()
line = strings.TrimRightFunc(line, unicode.IsSpace)
line = strings.TrimPrefix(line, indent)
if line == "" {
empty++
continue
}
// If requested, parse out "appendix" information (often added
// by `git format-patch` and removed by `git am`).
if separateAppendix && c == &body && line == "---" {
c = &appendix
continue
}
if c.Len() > 0 {
c.WriteByte('\n')
if empty > 0 {
c.WriteByte('\n')
}
}
empty = 0
c.WriteString(line)
}
return body.String(), appendix.String()
}
func parseHeaderMail(mailLine string, r io.Reader) (*PatchHeader, error) {
msg, err := mail.ReadMessage(r)
if err != nil {
return nil, err
}
h := &PatchHeader{}
mailLine = mailLine[len(mailHeaderPrefix):]
if i := strings.IndexByte(mailLine, ' '); i > 0 {
h.SHA = mailLine[:i]
}
addrs, err := msg.Header.AddressList("From")
if err != nil && !errors.Is(err, mail.ErrHeaderNotPresent) {
return nil, err
}
if len(addrs) > 0 {
addr := addrs[0]
if addr.Name == "" {
return nil, fmt.Errorf("invalid user string: %s", addr)
}
h.Author = &PatchIdentity{Name: addr.Name, Email: addr.Address}
}
date := msg.Header.Get("Date")
if date != "" {
d, err := ParsePatchDate(date)
if err != nil {
return nil, err
}
h.AuthorDate = d
}
subject := msg.Header.Get("Subject")
h.SubjectPrefix, h.Title = parseSubject(subject)
s := bufio.NewScanner(msg.Body)
h.Body, h.BodyAppendix = scanMessageBody(s, "", true)
if s.Err() != nil {
return nil, s.Err()
}
return h, nil
}
// Takes an email subject and returns the patch prefix and commit
// title. i.e., `[PATCH v3 3/5] Implement foo` would return `[PATCH
// v3 3/5] ` and `Implement foo`
func parseSubject(s string) (string, string) {
// This is meant to be compatible with
// https://github.com/git/git/blob/master/mailinfo.c:cleanup_subject().
// If compatibility with `git am` drifts, go there to see if there
// are any updates.
at := 0
for at < len(s) {
switch s[at] {
case 'r', 'R':
// Detect re:, Re:, rE: and RE:
if at+2 < len(s) &&
(s[at+1] == 'e' || s[at+1] == 'E') &&
s[at+2] == ':' {
at += 3
continue
}
case ' ', '\t', ':':
// Delete whitespace and duplicate ':' characters
at++
continue
case '[':
// Look for closing parenthesis
j := at + 1
for ; j < len(s); j++ {
if s[j] == ']' {
break
}
}
if j < len(s) {
at = j + 1
continue
}
}
// Only loop if we actually removed something
break
}
return s[:at], s[at:]
}

180
vendor/github.com/bluekeyes/go-gitdiff/gitdiff/text.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
package gitdiff
import (
"fmt"
"io"
"strconv"
"strings"
)
// ParseTextFragments parses text fragments until the next file header or the
// end of the stream and attaches them to the given file. It returns the number
// of fragments that were added.
func (p *parser) ParseTextFragments(f *File) (n int, err error) {
for {
frag, err := p.ParseTextFragmentHeader()
if err != nil {
return n, err
}
if frag == nil {
return n, nil
}
if f.IsNew && frag.OldLines > 0 {
return n, p.Errorf(-1, "new file depends on old contents")
}
if f.IsDelete && frag.NewLines > 0 {
return n, p.Errorf(-1, "deleted file still has contents")
}
if err := p.ParseTextChunk(frag); err != nil {
return n, err
}
f.TextFragments = append(f.TextFragments, frag)
n++
}
}
func (p *parser) ParseTextFragmentHeader() (*TextFragment, error) {
const (
startMark = "@@ -"
endMark = " @@"
)
if !strings.HasPrefix(p.Line(0), startMark) {
return nil, nil
}
parts := strings.SplitAfterN(p.Line(0), endMark, 2)
if len(parts) < 2 {
return nil, p.Errorf(0, "invalid fragment header")
}
f := &TextFragment{}
f.Comment = strings.TrimSpace(parts[1])
header := parts[0][len(startMark) : len(parts[0])-len(endMark)]
ranges := strings.Split(header, " +")
if len(ranges) != 2 {
return nil, p.Errorf(0, "invalid fragment header")
}
var err error
if f.OldPosition, f.OldLines, err = parseRange(ranges[0]); err != nil {
return nil, p.Errorf(0, "invalid fragment header: %v", err)
}
if f.NewPosition, f.NewLines, err = parseRange(ranges[1]); err != nil {
return nil, p.Errorf(0, "invalid fragment header: %v", err)
}
if err := p.Next(); err != nil && err != io.EOF {
return nil, err
}
return f, nil
}
func (p *parser) ParseTextChunk(frag *TextFragment) error {
if p.Line(0) == "" {
return p.Errorf(0, "no content following fragment header")
}
isNoNewlineLine := func(s string) bool {
// test for "\ No newline at end of file" by prefix because the text
// changes by locale (git claims all versions are at least 12 chars)
return len(s) >= 12 && s[:2] == "\\ "
}
oldLines, newLines := frag.OldLines, frag.NewLines
for {
line := p.Line(0)
op, data := line[0], line[1:]
switch op {
case '\n':
data = "\n"
fallthrough // newer GNU diff versions create empty context lines
case ' ':
oldLines--
newLines--
if frag.LinesAdded == 0 && frag.LinesDeleted == 0 {
frag.LeadingContext++
} else {
frag.TrailingContext++
}
frag.Lines = append(frag.Lines, Line{OpContext, data})
case '-':
oldLines--
frag.LinesDeleted++
frag.TrailingContext = 0
frag.Lines = append(frag.Lines, Line{OpDelete, data})
case '+':
newLines--
frag.LinesAdded++
frag.TrailingContext = 0
frag.Lines = append(frag.Lines, Line{OpAdd, data})
default:
// this may appear in middle of fragment if it's for a deleted line
if isNoNewlineLine(line) {
last := &frag.Lines[len(frag.Lines)-1]
last.Line = strings.TrimSuffix(last.Line, "\n")
break
}
// TODO(bkeyes): if this is because we hit the next header, it
// would be helpful to return the miscounts line error. We could
// either test for the common headers ("@@ -", "diff --git") or
// assume any invalid op ends the fragment; git returns the same
// generic error in all cases so either is compatible
return p.Errorf(0, "invalid line operation: %q", op)
}
next := p.Line(1)
if oldLines <= 0 && newLines <= 0 && !isNoNewlineLine(next) {
break
}
if err := p.Next(); err != nil {
if err == io.EOF {
break
}
return err
}
}
if oldLines != 0 || newLines != 0 {
hdr := max(frag.OldLines-oldLines, frag.NewLines-newLines) + 1
return p.Errorf(-hdr, "fragment header miscounts lines: %+d old, %+d new", -oldLines, -newLines)
}
if err := p.Next(); err != nil && err != io.EOF {
return err
}
return nil
}
func parseRange(s string) (start int64, end int64, err error) {
parts := strings.SplitN(s, ",", 2)
if start, err = strconv.ParseInt(parts[0], 10, 64); err != nil {
nerr := err.(*strconv.NumError)
return 0, 0, fmt.Errorf("bad start of range: %s: %v", parts[0], nerr.Err)
}
if len(parts) > 1 {
if end, err = strconv.ParseInt(parts[1], 10, 64); err != nil {
nerr := err.(*strconv.NumError)
return 0, 0, fmt.Errorf("bad end of range: %s: %v", parts[1], nerr.Err)
}
} else {
end = 1
}
return
}
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}

1
vendor/github.com/rocky-linux/srpmproc/AUTHORS generated vendored Normal file
View File

@ -0,0 +1 @@
Mustafa Gezen <mustafa@gezen.no>

1
vendor/github.com/rocky-linux/srpmproc/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1 @@
Mustafa Gezen <mustafa@gezen.no>

19
vendor/github.com/rocky-linux/srpmproc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2021 The Srpmproc Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

16
vendor/github.com/rocky-linux/srpmproc/pb/BUILD generated vendored Normal file
View File

@ -0,0 +1,16 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "pb",
srcs = [
"cfg.pb.go",
"response.pb.go",
],
importmap = "go.resf.org/peridot/vendor/github.com/rocky-linux/srpmproc/pb",
importpath = "github.com/rocky-linux/srpmproc/pb",
visibility = ["//visibility:public"],
deps = [
"@org_golang_google_protobuf//reflect/protoreflect",
"@org_golang_google_protobuf//runtime/protoimpl",
],
)

1446
vendor/github.com/rocky-linux/srpmproc/pb/cfg.pb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,249 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.19.3
// source: response.proto
package srpmprocpb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type VersionRelease struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
Release string `protobuf:"bytes,2,opt,name=release,proto3" json:"release,omitempty"`
}
func (x *VersionRelease) Reset() {
*x = VersionRelease{}
if protoimpl.UnsafeEnabled {
mi := &file_response_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *VersionRelease) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VersionRelease) ProtoMessage() {}
func (x *VersionRelease) ProtoReflect() protoreflect.Message {
mi := &file_response_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VersionRelease.ProtoReflect.Descriptor instead.
func (*VersionRelease) Descriptor() ([]byte, []int) {
return file_response_proto_rawDescGZIP(), []int{0}
}
func (x *VersionRelease) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *VersionRelease) GetRelease() string {
if x != nil {
return x.Release
}
return ""
}
type ProcessResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
BranchCommits map[string]string `protobuf:"bytes,1,rep,name=branch_commits,json=branchCommits,proto3" json:"branch_commits,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
BranchVersions map[string]*VersionRelease `protobuf:"bytes,2,rep,name=branch_versions,json=branchVersions,proto3" json:"branch_versions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *ProcessResponse) Reset() {
*x = ProcessResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_response_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProcessResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProcessResponse) ProtoMessage() {}
func (x *ProcessResponse) ProtoReflect() protoreflect.Message {
mi := &file_response_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProcessResponse.ProtoReflect.Descriptor instead.
func (*ProcessResponse) Descriptor() ([]byte, []int) {
return file_response_proto_rawDescGZIP(), []int{1}
}
func (x *ProcessResponse) GetBranchCommits() map[string]string {
if x != nil {
return x.BranchCommits
}
return nil
}
func (x *ProcessResponse) GetBranchVersions() map[string]*VersionRelease {
if x != nil {
return x.BranchVersions
}
return nil
}
var File_response_proto protoreflect.FileDescriptor
var file_response_proto_rawDesc = []byte{
0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x08, 0x73, 0x72, 0x70, 0x6d, 0x70, 0x72, 0x6f, 0x63, 0x22, 0x44, 0x0a, 0x0e, 0x56, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65,
0x22, 0xdd, 0x02, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x0e, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x63,
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73,
0x72, 0x70, 0x6d, 0x70, 0x72, 0x6f, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x43, 0x6f,
0x6d, 0x6d, 0x69, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x62, 0x72, 0x61, 0x6e,
0x63, 0x68, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x56, 0x0a, 0x0f, 0x62, 0x72, 0x61,
0x6e, 0x63, 0x68, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x72, 0x70, 0x6d, 0x70, 0x72, 0x6f, 0x63, 0x2e, 0x50, 0x72,
0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x72,
0x61, 0x6e, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x0e, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x73, 0x1a, 0x40, 0x0a, 0x12, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x43, 0x6f, 0x6d, 0x6d, 0x69,
0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x1a, 0x5b, 0x0a, 0x13, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x72,
0x70, 0x6d, 0x70, 0x72, 0x6f, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72,
0x6f, 0x63, 0x6b, 0x79, 0x2d, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2f, 0x73, 0x72, 0x70, 0x6d, 0x70,
0x72, 0x6f, 0x63, 0x2f, 0x70, 0x62, 0x3b, 0x73, 0x72, 0x70, 0x6d, 0x70, 0x72, 0x6f, 0x63, 0x70,
0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_response_proto_rawDescOnce sync.Once
file_response_proto_rawDescData = file_response_proto_rawDesc
)
func file_response_proto_rawDescGZIP() []byte {
file_response_proto_rawDescOnce.Do(func() {
file_response_proto_rawDescData = protoimpl.X.CompressGZIP(file_response_proto_rawDescData)
})
return file_response_proto_rawDescData
}
var file_response_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_response_proto_goTypes = []interface{}{
(*VersionRelease)(nil), // 0: srpmproc.VersionRelease
(*ProcessResponse)(nil), // 1: srpmproc.ProcessResponse
nil, // 2: srpmproc.ProcessResponse.BranchCommitsEntry
nil, // 3: srpmproc.ProcessResponse.BranchVersionsEntry
}
var file_response_proto_depIdxs = []int32{
2, // 0: srpmproc.ProcessResponse.branch_commits:type_name -> srpmproc.ProcessResponse.BranchCommitsEntry
3, // 1: srpmproc.ProcessResponse.branch_versions:type_name -> srpmproc.ProcessResponse.BranchVersionsEntry
0, // 2: srpmproc.ProcessResponse.BranchVersionsEntry.value:type_name -> srpmproc.VersionRelease
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_response_proto_init() }
func file_response_proto_init() {
if File_response_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_response_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*VersionRelease); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_response_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProcessResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_response_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_response_proto_goTypes,
DependencyIndexes: file_response_proto_depIdxs,
MessageInfos: file_response_proto_msgTypes,
}.Build()
File_response_proto = out.File
file_response_proto_rawDesc = nil
file_response_proto_goTypes = nil
file_response_proto_depIdxs = nil
}

View File

@ -0,0 +1,9 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "blob",
srcs = ["blob.go"],
importmap = "go.resf.org/peridot/vendor/github.com/rocky-linux/srpmproc/pkg/blob",
importpath = "github.com/rocky-linux/srpmproc/pkg/blob",
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,27 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package blob
type Storage interface {
Write(path string, content []byte) error
Read(path string) ([]byte, error)
Exists(path string) (bool, error)
}

19
vendor/github.com/rocky-linux/srpmproc/pkg/data/BUILD generated vendored Normal file
View File

@ -0,0 +1,19 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "data",
srcs = [
"import.go",
"process.go",
"utils.go",
],
importmap = "go.resf.org/peridot/vendor/github.com/rocky-linux/srpmproc/pkg/data",
importpath = "github.com/rocky-linux/srpmproc/pkg/data",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/go-git/go-billy/v5:go-billy",
"//vendor/github.com/go-git/go-git/v5:go-git",
"//vendor/github.com/go-git/go-git/v5/plumbing/transport",
"//vendor/github.com/rocky-linux/srpmproc/pkg/blob",
],
)

View File

@ -0,0 +1,52 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package data
import (
"hash"
"github.com/go-git/go-git/v5"
)
type ImportMode interface {
RetrieveSource(pd *ProcessData) (*ModeData, error)
WriteSource(pd *ProcessData, md *ModeData) error
PostProcess(md *ModeData) error
ImportName(pd *ProcessData, md *ModeData) string
}
type ModeData struct {
Name string
Repo *git.Repository
Worktree *git.Worktree
FileWrites map[string][]byte
TagBranch string
PushBranch string
Branches []string
SourcesToIgnore []*IgnoredSource
BlobCache map[string][]byte
}
type IgnoredSource struct {
Name string
HashFunction hash.Hash
Expired bool
}

View File

@ -0,0 +1,63 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package data
import (
"log"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/rocky-linux/srpmproc/pkg/blob"
)
type FsCreatorFunc func(branch string) (billy.Filesystem, error)
type ProcessData struct {
RpmLocation string
UpstreamPrefix string
Version int
GitCommitterName string
GitCommitterEmail string
Mode int
ModulePrefix string
ImportBranchPrefix string
BranchPrefix string
SingleTag string
Authenticator transport.AuthMethod
Importer ImportMode
BlobStorage blob.Storage
NoDupMode bool
ModuleMode bool
TmpFsMode string
NoStorageDownload bool
NoStorageUpload bool
ManualCommits []string
ModuleFallbackStream string
BranchSuffix string
StrictBranchMode bool
FsCreator FsCreatorFunc
CdnUrl string
Log *log.Logger
PackageVersion string
PackageRelease string
TaglessMode bool
Cdn string
}

View File

@ -0,0 +1,131 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package data
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"fmt"
"hash"
"io"
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
func CopyFromFs(from billy.Filesystem, to billy.Filesystem, path string) error {
read, err := from.ReadDir(path)
if err != nil {
return fmt.Errorf("could not read dir: %v", err)
}
for _, fi := range read {
fullPath := filepath.Join(path, fi.Name())
if fi.IsDir() {
_ = to.MkdirAll(fullPath, 0o755)
err := CopyFromFs(from, to, fullPath)
if err != nil {
return err
}
} else {
_ = to.Remove(fullPath)
f, err := to.OpenFile(fullPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fi.Mode())
if err != nil {
return fmt.Errorf("could not open file: %v", err)
}
oldFile, err := from.Open(fullPath)
if err != nil {
return fmt.Errorf("could not open from file: %v", err)
}
_, err = io.Copy(f, oldFile)
if err != nil {
return fmt.Errorf("could not copy from oldFile to new: %v", err)
}
}
}
return nil
}
func IgnoredContains(a []*IgnoredSource, b string) bool {
for _, val := range a {
if val.Name == b {
return true
}
}
return false
}
func StrContains(a []string, b string) bool {
for _, val := range a {
if val == b {
return true
}
}
return false
}
// CompareHash checks if content and checksum matches
// returns the hash type if success else nil
func (pd *ProcessData) CompareHash(content []byte, checksum string) hash.Hash {
var hashType hash.Hash
switch len(checksum) {
case 128:
hashType = sha512.New()
break
case 64:
hashType = sha256.New()
break
case 40:
hashType = sha1.New()
break
case 32:
hashType = md5.New()
break
default:
return nil
}
hashType.Reset()
_, err := hashType.Write(content)
if err != nil {
return nil
}
calculated := hex.EncodeToString(hashType.Sum(nil))
if calculated != checksum {
pd.Log.Printf("wanted checksum %s, but got %s", checksum, calculated)
return nil
}
return hashType
}

View File

@ -0,0 +1,23 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "directives",
srcs = [
"add.go",
"delete.go",
"directives.go",
"lookaside.go",
"patch.go",
"replace.go",
"spec_change.go",
],
importmap = "go.resf.org/peridot/vendor/github.com/rocky-linux/srpmproc/pkg/directives",
importpath = "github.com/rocky-linux/srpmproc/pkg/directives",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/bluekeyes/go-gitdiff/gitdiff",
"//vendor/github.com/go-git/go-git/v5:go-git",
"//vendor/github.com/rocky-linux/srpmproc/pb",
"//vendor/github.com/rocky-linux/srpmproc/pkg/data",
],
)

View File

@ -0,0 +1,95 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
// returns right if not empty, else left
func eitherString(left string, right string) string {
if right != "" {
return right
}
return left
}
func add(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error {
for _, add := range cfg.Add {
var replacingBytes []byte
var filePath string
switch addType := add.Source.(type) {
case *srpmprocpb.Add_File:
filePath = checkAddPrefix(eitherString(filepath.Base(addType.File), add.Name))
fPatch, err := patchTree.Filesystem.OpenFile(addType.File, os.O_RDONLY, 0o644)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_FROM:%s", addType.File))
}
replacingBytes, err = ioutil.ReadAll(fPatch)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_READ_FROM:%s", addType.File))
}
break
case *srpmprocpb.Add_Lookaside:
filePath = checkAddPrefix(eitherString(filepath.Base(addType.Lookaside), add.Name))
var err error
replacingBytes, err = pd.BlobStorage.Read(addType.Lookaside)
if err != nil {
return err
}
hashFunction := pd.CompareHash(replacingBytes, addType.Lookaside)
if hashFunction == nil {
return errors.New(fmt.Sprintf("LOOKASIDE_HASH_DOES_NOT_MATCH:%s", addType.Lookaside))
}
md.SourcesToIgnore = append(md.SourcesToIgnore, &data.IgnoredSource{
Name: filePath,
HashFunction: hashFunction,
})
break
}
f, err := pushTree.Filesystem.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_DESTINATION:%s", filePath))
}
_, err = f.Write(replacingBytes)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_DESTIONATION:%s", filePath))
}
}
return nil
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"errors"
"fmt"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
func del(cfg *srpmprocpb.Cfg, _ *data.ProcessData, _ *data.ModeData, _ *git.Worktree, pushTree *git.Worktree) error {
for _, del := range cfg.Delete {
filePath := del.File
_, err := pushTree.Filesystem.Stat(filePath)
if err != nil {
return errors.New(fmt.Sprintf("FILE_DOES_NOT_EXIST:%s", filePath))
}
err = pushTree.Filesystem.Remove(filePath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_DELETE_FILE:%s", filePath))
}
}
return nil
}

View File

@ -0,0 +1,65 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"path/filepath"
"strings"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
func checkAddPrefix(file string) string {
if strings.HasPrefix(file, "SOURCES/") ||
strings.HasPrefix(file, "SPECS/") {
return file
}
return filepath.Join("SOURCES", file)
}
func Apply(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) []error {
var errs []error
directives := []func(*srpmprocpb.Cfg, *data.ProcessData, *data.ModeData, *git.Worktree, *git.Worktree) error{
replace,
del,
add,
patch,
lookaside,
specChange,
}
for _, directive := range directives {
err := directive(cfg, pd, md, patchTree, pushTree)
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errs
}
return nil
}

View File

@ -0,0 +1,148 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto/sha256"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
func lookaside(cfg *srpmprocpb.Cfg, _ *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error {
for _, directive := range cfg.Lookaside {
var buf bytes.Buffer
writer := tar.NewWriter(&buf)
w := pushTree
if directive.FromPatchTree {
w = patchTree
}
for _, file := range directive.File {
if directive.Tar && directive.ArchiveName == "" {
return errors.New("TAR_NO_ARCHIVE_NAME")
}
path := filepath.Join("SOURCES", file)
if directive.FromPatchTree {
path = file
}
stat, err := w.Filesystem.Stat(path)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_STAT_FILE:%s", path))
}
f, err := w.Filesystem.Open(path)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_FILE:%s", path))
}
bts, err := ioutil.ReadAll(f)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_READ_FILE:%s", path))
}
if directive.Tar {
hdr := &tar.Header{
Name: file,
Mode: int64(stat.Mode()),
Size: stat.Size(),
}
err = writer.WriteHeader(hdr)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_TAR_HEADER:%s", file))
}
_, err = writer.Write(bts)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_TAR_FILE:%s", file))
}
} else {
if directive.FromPatchTree {
pushF, err := pushTree.Filesystem.OpenFile(filepath.Join("SOURCES", filepath.Base(file)), os.O_CREATE|os.O_TRUNC|os.O_RDWR, stat.Mode())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_CREATE_FILE_IN_PUSH_TREE:%s", file))
}
_, err = pushF.Write(bts)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_FILE_IN_PUSH_TREE:%s", file))
}
}
md.SourcesToIgnore = append(md.SourcesToIgnore, &data.IgnoredSource{
Name: filepath.Join("SOURCES", file),
HashFunction: sha256.New(),
})
}
}
if directive.Tar {
err := writer.Close()
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_CLOSE_TAR:%s", directive.ArchiveName))
}
var gbuf bytes.Buffer
gw := gzip.NewWriter(&gbuf)
gw.Name = fmt.Sprintf("%s.tar.gz", directive.ArchiveName)
gw.ModTime = time.Now()
_, err = gw.Write(buf.Bytes())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_GZIP:%s", directive.ArchiveName))
}
err = gw.Close()
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_CLOSE_GZIP:%s", directive.ArchiveName))
}
path := filepath.Join("SOURCES", fmt.Sprintf("%s.tar.gz", directive.ArchiveName))
pushF, err := pushTree.Filesystem.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o644)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_CREATE_TAR_FILE:%s", path))
}
_, err = pushF.Write(gbuf.Bytes())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_TAR_FILE:%s", path))
}
md.SourcesToIgnore = append(md.SourcesToIgnore, &data.IgnoredSource{
Name: path,
HashFunction: sha256.New(),
})
}
}
return nil
}

View File

@ -0,0 +1,112 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"bytes"
"errors"
"fmt"
"github.com/bluekeyes/go-gitdiff/gitdiff"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
func patch(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error {
for _, patch := range cfg.Patch {
patchFile, err := patchTree.Filesystem.Open(patch.File)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_PATCH_FILE:%s", patch.File))
}
files, _, err := gitdiff.Parse(patchFile)
if err != nil {
pd.Log.Printf("could not parse patch file: %v", err)
return errors.New(fmt.Sprintf("COULD_NOT_PARSE_PATCH_FILE:%s", patch.File))
}
for _, patchedFile := range files {
srcPath := patchedFile.NewName
if !patch.Strict {
srcPath = checkAddPrefix(patchedFile.NewName)
}
var output bytes.Buffer
if !patchedFile.IsDelete && !patchedFile.IsNew {
patchSubjectFile, err := pushTree.Filesystem.Open(srcPath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_PATCH_SUBJECT:%s", srcPath))
}
err = gitdiff.NewApplier(patchSubjectFile).ApplyFile(&output, patchedFile)
if err != nil {
pd.Log.Printf("could not apply patch: %v", err)
return errors.New(fmt.Sprintf("COULD_NOT_APPLY_PATCH_WITH_SUBJECT:%s", srcPath))
}
}
oldName := patchedFile.OldName
if !patch.Strict {
oldName = checkAddPrefix(patchedFile.OldName)
}
_ = pushTree.Filesystem.Remove(oldName)
_ = pushTree.Filesystem.Remove(srcPath)
if patchedFile.IsNew {
newFile, err := pushTree.Filesystem.Create(srcPath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_CREATE_NEW_FILE:%s", srcPath))
}
err = gitdiff.NewApplier(newFile).ApplyFile(&output, patchedFile)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_APPLY_PATCH_TO_NEW_FILE:%s", srcPath))
}
_, err = newFile.Write(output.Bytes())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_TO_NEW_FILE:%s", srcPath))
}
_, err = pushTree.Add(srcPath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_ADD_NEW_FILE_TO_GIT:%s", srcPath))
}
} else if !patchedFile.IsDelete {
newFile, err := pushTree.Filesystem.Create(srcPath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_CREATE_POST_PATCH_FILE:%s", srcPath))
}
_, err = newFile.Write(output.Bytes())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_POST_PATCH_FILE:%s", srcPath))
}
_, err = pushTree.Add(srcPath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_ADD_POST_PATCH_FILE_TO_GIT:%s", srcPath))
}
} else {
_, err = pushTree.Remove(oldName)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_REMOVE_FILE_FROM_GIT:%s", oldName))
}
}
}
}
return nil
}

View File

@ -0,0 +1,94 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
func replace(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error {
for _, replace := range cfg.Replace {
filePath := checkAddPrefix(replace.File)
stat, err := pushTree.Filesystem.Stat(filePath)
if replace.File == "" || err != nil {
return errors.New(fmt.Sprintf("INVALID_FILE:%s", filePath))
}
err = pushTree.Filesystem.Remove(filePath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_REMOVE_OLD_FILE:%s", filePath))
}
f, err := pushTree.Filesystem.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, stat.Mode())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_REPLACEMENT:%s", filePath))
}
switch replacing := replace.Replacing.(type) {
case *srpmprocpb.Replace_WithFile:
fPatch, err := patchTree.Filesystem.OpenFile(replacing.WithFile, os.O_RDONLY, 0o644)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_REPLACING:%s", replacing.WithFile))
}
replacingBytes, err := ioutil.ReadAll(fPatch)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_READ_REPLACING:%s", replacing.WithFile))
}
_, err = f.Write(replacingBytes)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_REPLACING:%s", replacing.WithFile))
}
break
case *srpmprocpb.Replace_WithInline:
_, err := f.Write([]byte(replacing.WithInline))
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_INLINE:%s", filePath))
}
break
case *srpmprocpb.Replace_WithLookaside:
bts, err := pd.BlobStorage.Read(replacing.WithLookaside)
if err != nil {
return err
}
hasher := pd.CompareHash(bts, replacing.WithLookaside)
if hasher == nil {
return errors.New("LOOKASIDE_FILE_AND_HASH_NOT_MATCHING")
}
_, err = f.Write(bts)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_WRITE_LOOKASIDE:%s", filePath))
}
break
}
}
return nil
}

View File

@ -0,0 +1,496 @@
// Copyright (c) 2021 The Srpmproc Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package directives
import (
"errors"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/go-git/go-git/v5"
srpmprocpb "github.com/rocky-linux/srpmproc/pb"
"github.com/rocky-linux/srpmproc/pkg/data"
)
const (
sectionChangelog = "%changelog"
)
var sections = []string{"%description", "%prep", "%build", "%install", "%files", "%changelog"}
type sourcePatchOperationInLoopRequest struct {
cfg *srpmprocpb.Cfg
field string
value *string
longestField int
lastNum *int
in *string
expectedField string
operation srpmprocpb.SpecChange_FileOperation_Type
}
type sourcePatchOperationAfterLoopRequest struct {
cfg *srpmprocpb.Cfg
inLoopNum int
lastNum *int
longestField int
newLines *[]string
in *string
expectedField string
operation srpmprocpb.SpecChange_FileOperation_Type
}
func sourcePatchOperationInLoop(req *sourcePatchOperationInLoopRequest) error {
if strings.HasPrefix(req.field, req.expectedField) {
for _, file := range req.cfg.SpecChange.File {
if file.Type != req.operation {
continue
}
switch file.Mode.(type) {
case *srpmprocpb.SpecChange_FileOperation_Delete:
if file.Name == *req.value {
*req.value = ""
}
break
}
}
if req.field != req.expectedField {
sourceNum, err := strconv.Atoi(strings.Split(req.field, req.expectedField)[1])
if err != nil {
return errors.New(fmt.Sprintf("INVALID_%s_NUM:%s", strings.ToUpper(req.expectedField), req.field))
}
*req.lastNum = sourceNum
}
}
return nil
}
func sourcePatchOperationAfterLoop(req *sourcePatchOperationAfterLoopRequest) (bool, error) {
if req.inLoopNum == *req.lastNum && *req.in == req.expectedField {
for _, file := range req.cfg.SpecChange.File {
if file.Type != req.operation {
continue
}
switch file.Mode.(type) {
case *srpmprocpb.SpecChange_FileOperation_Add:
fieldNum := *req.lastNum + 1
field := fmt.Sprintf("%s%d", req.expectedField, fieldNum)
spaces := calculateSpaces(req.longestField, len(field), req.cfg.SpecChange.DisableAutoAlign)
*req.newLines = append(*req.newLines, fmt.Sprintf("%s:%s%s", field, spaces, file.Name))
if req.expectedField == "Patch" && file.AddToPrep {
val := fmt.Sprintf("%%patch%d", fieldNum)
if file.NPath > 0 {
val = fmt.Sprintf("%s -p%d", val, file.NPath)
}
req.cfg.SpecChange.Append = append(req.cfg.SpecChange.Append, &srpmprocpb.SpecChange_AppendOperation{
Field: "%prep",
Value: val,
})
}
*req.lastNum++
break
}
}
*req.in = ""
return true, nil
}
return false, nil
}
func calculateSpaces(longestField int, fieldLength int, disableAutoAlign bool) string {
if disableAutoAlign {
return " "
}
return strings.Repeat(" ", longestField+8-fieldLength)
}
func searchAndReplaceLine(line string, sar []*srpmprocpb.SpecChange_SearchAndReplaceOperation) string {
for _, searchAndReplace := range sar {
switch searchAndReplace.Identifier.(type) {
case *srpmprocpb.SpecChange_SearchAndReplaceOperation_Any:
line = strings.Replace(line, searchAndReplace.Find, searchAndReplace.Replace, int(searchAndReplace.N))
break
case *srpmprocpb.SpecChange_SearchAndReplaceOperation_StartsWith:
if strings.HasPrefix(strings.TrimSpace(line), searchAndReplace.Find) {
line = strings.Replace(line, searchAndReplace.Find, searchAndReplace.Replace, int(searchAndReplace.N))
}
break
case *srpmprocpb.SpecChange_SearchAndReplaceOperation_EndsWith:
if strings.HasSuffix(strings.TrimSpace(line), searchAndReplace.Find) {
line = strings.Replace(line, searchAndReplace.Find, searchAndReplace.Replace, int(searchAndReplace.N))
}
break
}
}
return line
}
func isNextLineSection(lineNum int, lines []string) bool {
if len(lines)-1 > lineNum {
if strings.HasPrefix(strings.TrimSpace(lines[lineNum+1]), "%") {
return true
}
return false
}
return true
}
func setFASlice(futureAdditions map[int][]string, key int, addition string) {
if futureAdditions[key] == nil {
futureAdditions[key] = []string{}
}
futureAdditions[key] = append(futureAdditions[key], addition)
}
func strSliceContains(slice []string, str string) bool {
for _, x := range slice {
if str == x {
return true
}
}
return false
}
func specChange(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, _ *git.Worktree, pushTree *git.Worktree) error {
// no spec change operations present
// skip parsing spec
if cfg.SpecChange == nil {
return nil
}
specFiles, err := pushTree.Filesystem.ReadDir("SPECS")
if err != nil {
return errors.New("COULD_NOT_READ_SPECS_DIR")
}
if len(specFiles) != 1 {
return errors.New("ONLY_ONE_SPEC_FILE_IS_SUPPORTED")
}
filePath := filepath.Join("SPECS", specFiles[0].Name())
stat, err := pushTree.Filesystem.Stat(filePath)
if err != nil {
return errors.New("COULD_NOT_STAT_SPEC_FILE")
}
specFile, err := pushTree.Filesystem.OpenFile(filePath, os.O_RDONLY, 0o644)
if err != nil {
return errors.New("COULD_NOT_READ_SPEC_FILE")
}
specBts, err := ioutil.ReadAll(specFile)
if err != nil {
return errors.New("COULD_NOT_READ_ALL_BYTES")
}
specStr := string(specBts)
lines := strings.Split(specStr, "\n")
var newLines []string
futureAdditions := map[int][]string{}
newFieldMemory := map[string]map[string]int{}
lastSourceNum := 0
lastPatchNum := 0
inSection := ""
inField := ""
lastSource := ""
lastPatch := ""
hasPatch := false
version := ""
importName := strings.Replace(pd.Importer.ImportName(pd, md), md.Name, "1", 1)
importNameSplit := strings.SplitN(importName, "-", 2)
if len(importNameSplit) == 2 {
versionSplit := strings.SplitN(importNameSplit[1], ".el", 2)
if len(versionSplit) == 2 {
version = versionSplit[0]
} else {
versionSplit := strings.SplitN(importNameSplit[1], ".module", 2)
if len(versionSplit) == 2 {
version = versionSplit[0]
}
}
}
fieldValueRegex := regexp.MustCompile("^[a-zA-Z0-9]+:")
longestField := 0
for lineNum, line := range lines {
if fieldValueRegex.MatchString(line) {
fieldValue := strings.SplitN(line, ":", 2)
field := strings.TrimSpace(fieldValue[0])
longestField = int(math.Max(float64(len(field)), float64(longestField)))
if strings.HasPrefix(field, "Source") {
lastSource = field
} else if strings.HasPrefix(field, "Patch") {
lastPatch = field
hasPatch = true
} else {
for _, nf := range cfg.SpecChange.NewField {
if field == nf.Key {
if newFieldMemory[field] == nil {
newFieldMemory[field] = map[string]int{}
}
newFieldMemory[field][nf.Value] = lineNum
}
}
}
}
}
for _, nf := range cfg.SpecChange.NewField {
if newFieldMemory[nf.Key] == nil {
newFieldMemory[nf.Key] = map[string]int{}
newFieldMemory[nf.Key][nf.Value] = 0
}
}
for field, nfm := range newFieldMemory {
for value, lineNum := range nfm {
if lineNum != 0 {
newLine := fmt.Sprintf("%s:%s%s", field, calculateSpaces(longestField, len(field), cfg.SpecChange.DisableAutoAlign), value)
setFASlice(futureAdditions, lineNum+1, newLine)
}
}
}
for lineNum, line := range lines {
inLoopSourceNum := lastSourceNum
inLoopPatchNum := lastPatchNum
prefixLine := strings.TrimSpace(line)
for i, additions := range futureAdditions {
if lineNum == i {
for _, addition := range additions {
newLines = append(newLines, addition)
}
}
}
if fieldValueRegex.MatchString(line) {
line = searchAndReplaceLine(line, cfg.SpecChange.SearchAndReplace)
fieldValue := strings.SplitN(line, ":", 2)
field := strings.TrimSpace(fieldValue[0])
value := strings.TrimSpace(fieldValue[1])
if field == lastSource {
inField = "Source"
} else if field == lastPatch {
inField = "Patch"
}
if field == "Version" && version == "" {
version = value
}
for _, searchAndReplace := range cfg.SpecChange.SearchAndReplace {
switch identifier := searchAndReplace.Identifier.(type) {
case *srpmprocpb.SpecChange_SearchAndReplaceOperation_Field:
if field == identifier.Field {
value = strings.Replace(value, searchAndReplace.Find, searchAndReplace.Replace, int(searchAndReplace.N))
}
break
}
}
for _, appendOp := range cfg.SpecChange.Append {
if field == appendOp.Field {
value = value + appendOp.Value
if field == "Release" {
version = version + appendOp.Value
}
}
}
spaces := calculateSpaces(longestField, len(field), cfg.SpecChange.DisableAutoAlign)
err := sourcePatchOperationInLoop(&sourcePatchOperationInLoopRequest{
cfg: cfg,
field: field,
value: &value,
lastNum: &lastSourceNum,
longestField: longestField,
in: &inField,
expectedField: "Source",
operation: srpmprocpb.SpecChange_FileOperation_Source,
})
if err != nil {
return err
}
err = sourcePatchOperationInLoop(&sourcePatchOperationInLoopRequest{
cfg: cfg,
field: field,
value: &value,
longestField: longestField,
lastNum: &lastPatchNum,
in: &inField,
expectedField: "Patch",
operation: srpmprocpb.SpecChange_FileOperation_Patch,
})
if err != nil {
return err
}
if value != "" {
newLines = append(newLines, fmt.Sprintf("%s:%s%s", field, spaces, value))
}
} else {
executed, err := sourcePatchOperationAfterLoop(&sourcePatchOperationAfterLoopRequest{
cfg: cfg,
inLoopNum: inLoopSourceNum,
lastNum: &lastSourceNum,
longestField: longestField,
newLines: &newLines,
expectedField: "Source",
in: &inField,
operation: srpmprocpb.SpecChange_FileOperation_Source,
})
if err != nil {
return err
}
if executed && !hasPatch {
newLines = append(newLines, "")
inField = "Patch"
}
executed, err = sourcePatchOperationAfterLoop(&sourcePatchOperationAfterLoopRequest{
cfg: cfg,
inLoopNum: inLoopPatchNum,
lastNum: &lastPatchNum,
longestField: longestField,
newLines: &newLines,
expectedField: "Patch",
in: &inField,
operation: srpmprocpb.SpecChange_FileOperation_Patch,
})
if err != nil {
return err
}
if executed {
var innerNewLines []string
for field, nfm := range newFieldMemory {
for value, ln := range nfm {
newLine := fmt.Sprintf("%s:%s%s", field, calculateSpaces(longestField, len(field), cfg.SpecChange.DisableAutoAlign), value)
if ln == 0 {
if isNextLineSection(lineNum, lines) {
innerNewLines = append(innerNewLines, newLine)
}
}
}
}
if len(innerNewLines) > 0 {
newLines = append(newLines, "")
for _, il := range innerNewLines {
newLines = append(newLines, il)
}
}
}
if executed && !strings.Contains(specStr, "%changelog") {
newLines = append(newLines, "")
newLines = append(newLines, "%changelog")
inSection = sectionChangelog
}
if inSection == sectionChangelog {
now := time.Now().Format("Mon Jan 02 2006")
for _, changelog := range cfg.SpecChange.Changelog {
newLines = append(newLines, fmt.Sprintf("* %s %s <%s> - %s", now, changelog.AuthorName, changelog.AuthorEmail, version))
for _, msg := range changelog.Message {
newLines = append(newLines, fmt.Sprintf("- %s", msg))
}
newLines = append(newLines, "")
}
inSection = ""
} else {
line = searchAndReplaceLine(line, cfg.SpecChange.SearchAndReplace)
}
if strings.HasPrefix(prefixLine, "%") {
inSection = prefixLine
for _, appendOp := range cfg.SpecChange.Append {
if inSection == appendOp.Field {
insertedLine := 0
for i, x := range lines[lineNum+1:] {
if strSliceContains(sections, strings.TrimSpace(x)) {
insertedLine = lineNum + i
setFASlice(futureAdditions, insertedLine, appendOp.Value)
break
}
}
if insertedLine == 0 {
for i, x := range lines[lineNum+1:] {
if strings.TrimSpace(x) == "" {
insertedLine = lineNum + i + 2
setFASlice(futureAdditions, insertedLine, appendOp.Value)
break
}
}
}
}
}
}
newLines = append(newLines, line)
}
}
err = pushTree.Filesystem.Remove(filePath)
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_REMOVE_OLD_SPEC_FILE:%s", filePath))
}
f, err := pushTree.Filesystem.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, stat.Mode())
if err != nil {
return errors.New(fmt.Sprintf("COULD_NOT_OPEN_REPLACEMENT_SPEC_FILE:%s", filePath))
}
_, err = f.Write([]byte(strings.Join(newLines, "\n")))
if err != nil {
return errors.New("COULD_NOT_WRITE_NEW_SPEC_FILE")
}
return nil
}

10
vendor/github.com/rocky-linux/srpmproc/pkg/misc/BUILD generated vendored Normal file
View File

@ -0,0 +1,10 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "misc",
srcs = ["regex.go"],
importmap = "go.resf.org/peridot/vendor/github.com/rocky-linux/srpmproc/pkg/misc",
importpath = "github.com/rocky-linux/srpmproc/pkg/misc",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/rocky-linux/srpmproc/pkg/data"],
)

View File

@ -0,0 +1,53 @@
package misc
import (
"fmt"
"path/filepath"
"regexp"
"strings"
"github.com/rocky-linux/srpmproc/pkg/data"
)
func GetTagImportRegex(pd *data.ProcessData) *regexp.Regexp {
branchRegex := regexp.QuoteMeta(fmt.Sprintf("%s%d%s", pd.ImportBranchPrefix, pd.Version, pd.BranchSuffix))
if !pd.StrictBranchMode {
branchRegex += "(?:.+|)"
} else {
branchRegex += "(?:-stream-.+|)"
}
initialVerRegex := regexp.QuoteMeta(filepath.Base(pd.RpmLocation)) + "-"
if pd.PackageVersion != "" {
initialVerRegex += regexp.QuoteMeta(pd.PackageVersion) + "-"
} else {
initialVerRegex += ".+-"
}
if pd.PackageRelease != "" {
initialVerRegex += regexp.QuoteMeta(pd.PackageRelease)
} else {
initialVerRegex += ".+"
}
regex := fmt.Sprintf("(?i)refs/tags/(imports/(%s)/(%s))", branchRegex, initialVerRegex)
return regexp.MustCompile(regex)
}
// Given a git reference in tagless mode (like "refs/heads/c9s", or "refs/heads/stream-httpd-2.4-rhel-9.1.0"), determine
// if we are ok with importing that reference. We are looking for the traditional <prefix><version><suffix> pattern, like "c9s", and also the
// modular "stream-<NAME>-<VERSION>-rhel-<VERSION> branch pattern as well
func TaglessRefOk(tag string, pd *data.ProcessData) bool {
// First case is very easy: if we are exactly "refs/heads/<prefix><version><suffix>" , then this is def. a branch we should import
if tag == fmt.Sprintf("refs/heads/%s%d%s", pd.ImportBranchPrefix, pd.Version, pd.BranchSuffix) {
return true
}
// Less easy: if a modular branch is present (starts w/ "stream-"), we need to check if it's part of our major version, and return true if it is
// (major version means we look for the text "rhel-X." in the branch name, like "rhel-9.1.0")
if strings.HasPrefix(tag, "refs/heads/stream-") && strings.Contains(tag, fmt.Sprintf("rhel-%d.", pd.Version)) {
return true
}
return false
}

21
vendor/go.temporal.io/sdk/interceptor/BUILD generated vendored Normal file
View File

@ -0,0 +1,21 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "interceptor",
srcs = [
"interceptor.go",
"tracing_interceptor.go",
],
importmap = "go.resf.org/peridot/vendor/go.temporal.io/sdk/interceptor",
importpath = "go.temporal.io/sdk/interceptor",
visibility = ["//visibility:public"],
deps = [
"//vendor/go.temporal.io/api/common/v1:common",
"//vendor/go.temporal.io/sdk/activity",
"//vendor/go.temporal.io/sdk/client",
"//vendor/go.temporal.io/sdk/converter",
"//vendor/go.temporal.io/sdk/internal",
"//vendor/go.temporal.io/sdk/log",
"//vendor/go.temporal.io/sdk/workflow",
],
)

225
vendor/go.temporal.io/sdk/interceptor/interceptor.go generated vendored Normal file
View File

@ -0,0 +1,225 @@
// The MIT License
//
// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package interceptor contains interceptors for client and worker calls.
package interceptor
import (
"context"
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/sdk/internal"
"go.temporal.io/sdk/workflow"
)
// Interceptor is a common interface for all interceptors. It combines
// ClientInterceptor and WorkerInterceptor. If an implementation of this
// interceptor is provided via client options, some client calls and all worker
// calls will be intercepted by it. If an implementation of this interceptor is
// provided via worker options, all worker calls will be intercepted by it.
//
// All implementations of this should embed InterceptorBase but are not required
// to.
type Interceptor = internal.Interceptor
// InterceptorBase is a default implementation of Interceptor meant for
// embedding. It simply embeds ClientInterceptorBase and WorkerInterceptorBase.
type InterceptorBase = internal.InterceptorBase
// WorkerInterceptor is an interface for all calls that can be intercepted
// during worker operation. This includes inbound (from server) and outbound
// (from SDK) workflow and activity calls. If an implementation of this is
// provided via client or worker options, all worker calls will be intercepted
// by it.
//
// All implementations must embed WorkerInterceptorBase to safely handle future
// changes.
type WorkerInterceptor = internal.WorkerInterceptor
// WorkerInterceptorBase is a default implementation of WorkerInterceptor that
// simply instantiates ActivityInboundInterceptorBase or
// WorkflowInboundInterceptorBase when called to intercept activities or
// workflows respectively.
//
// This must be embedded into all WorkerInterceptor implementations to safely
// handle future changes.
type WorkerInterceptorBase = internal.WorkerInterceptorBase
// ActivityInboundInterceptor is an interface for all activity calls originating
// from the server. Implementers wanting to intercept outbound (i.e. from SDK)
// activity calls, can change the outbound interceptor in Init before the next
// call in the chain.
//
// All implementations must embed ActivityInboundInterceptorBase to safely
// handle future changes.
type ActivityInboundInterceptor = internal.ActivityInboundInterceptor
// ActivityInboundInterceptorBase is a default implementation of
// ActivityInboundInterceptor that forwards calls to the next inbound
// interceptor and uses an ActivityOutboundInterceptorBase on Init.
//
// This must be embedded into all ActivityInboundInterceptor implementations to
// safely handle future changes.
type ActivityInboundInterceptorBase = internal.ActivityInboundInterceptorBase
// ExecuteActivityInput is input for ActivityInboundInterceptor.ExecuteActivity.
type ExecuteActivityInput = internal.ExecuteActivityInput
// ActivityOutboundInterceptor is an interface for all activity calls
// originating from the SDK.
//
// All implementations must embed ActivityOutboundInterceptorBase to safely
// handle future changes.
type ActivityOutboundInterceptor = internal.ActivityOutboundInterceptor
// ActivityOutboundInterceptorBase is a default implementation of
// ActivityOutboundInterceptor that forwards calls to the next outbound
// interceptor.
//
// This must be embedded into all ActivityOutboundInterceptor implementations to
// safely handle future changes.
type ActivityOutboundInterceptorBase = internal.ActivityOutboundInterceptorBase
// WorkflowInboundInterceptor is an interface for all workflow calls originating
// from the server. Implementers wanting to intercept outbound (i.e. from SDK)
// workflow calls, can change the outbound interceptor in Init before the next
// call in the chain.
//
// All implementations must embed WorkflowInboundInterceptorBase to safely
// handle future changes.
type WorkflowInboundInterceptor = internal.WorkflowInboundInterceptor
// WorkflowInboundInterceptorBase is a default implementation of
// WorkflowInboundInterceptor that forwards calls to the next inbound
// interceptor and uses an WorkflowOutboundInterceptorBase on Init.
//
// This must be embedded into all WorkflowInboundInterceptor implementations to
// safely handle future changes.
type WorkflowInboundInterceptorBase = internal.WorkflowInboundInterceptorBase
// ExecuteWorkflowInput is input for WorkflowInboundInterceptor.ExecuteWorkflow.
type ExecuteWorkflowInput = internal.ExecuteWorkflowInput
// HandleSignalInput is input for WorkflowInboundInterceptor.HandleSignal.
type HandleSignalInput = internal.HandleSignalInput
// HandleQueryInput is input for WorkflowInboundInterceptor.HandleQuery.
type HandleQueryInput = internal.HandleQueryInput
// WorkflowOutboundInterceptor is an interface for all workflow calls
// originating from the SDK.
//
// All implementations must embed WorkflowOutboundInterceptorBase to safely
// handle future changes.
type WorkflowOutboundInterceptor = internal.WorkflowOutboundInterceptor
// WorkflowOutboundInterceptorBase is a default implementation of
// WorkflowOutboundInterceptor that forwards calls to the next outbound
// interceptor.
//
// This must be embedded into all WorkflowOutboundInterceptor implementations to
// safely handle future changes.
type WorkflowOutboundInterceptorBase = internal.WorkflowOutboundInterceptorBase
// ClientInterceptor for providing a ClientOutboundInterceptor to intercept
// certain workflow-specific client calls from the SDK. If an implementation of
// this is provided via client or worker options, certain client calls will be
// intercepted by it.
//
// All implementations must embed ClientInterceptorBase to safely handle future
// changes.
type ClientInterceptor = internal.ClientInterceptor
// ClientInterceptorBase is a default implementation of ClientInterceptor that
// simply instantiates ClientOutboundInterceptorBase when called to intercept
// the client.
//
// This must be embedded into all ClientInterceptor implementations to safely
// handle future changes.
type ClientInterceptorBase = internal.ClientInterceptorBase
// ClientOutboundInterceptor is an interface for certain workflow-specific calls
// originating from the SDK.
//
// All implementations must embed ClientOutboundInterceptorBase to safely handle
// future changes.
type ClientOutboundInterceptor = internal.ClientOutboundInterceptor
// ClientOutboundInterceptorBase is a default implementation of
// ClientOutboundInterceptor that forwards calls to the next outbound
// interceptor.
//
// This must be embedded into all ActivityInboundInterceptor implementations to
// safely handle future changes.
type ClientOutboundInterceptorBase = internal.ClientOutboundInterceptorBase
// ClientExecuteWorkflowInput is input for
// ClientOutboundInterceptor.ExecuteWorkflow.
type ClientExecuteWorkflowInput = internal.ClientExecuteWorkflowInput
// ClientSignalWorkflowInput is input for
// ClientOutboundInterceptor.SignalWorkflow.
type ClientSignalWorkflowInput = internal.ClientSignalWorkflowInput
// ClientSignalWithStartWorkflowInput is input for
// ClientOutboundInterceptor.SignalWithStartWorkflow.
type ClientSignalWithStartWorkflowInput = internal.ClientSignalWithStartWorkflowInput
// ClientCancelWorkflowInput is input for
// ClientOutboundInterceptor.CancelWorkflow.
type ClientCancelWorkflowInput = internal.ClientCancelWorkflowInput
// ClientTerminateWorkflowInput is input for
// ClientOutboundInterceptor.TerminateWorkflow.
type ClientTerminateWorkflowInput = internal.ClientTerminateWorkflowInput
// ClientQueryWorkflowInput is input for
// ClientOutboundInterceptor.QueryWorkflow.
type ClientQueryWorkflowInput = internal.ClientQueryWorkflowInput
// ScheduleClientCreateInput is input for
// ScheduleClientInterceptor.CreateSchedule.
type ScheduleClientCreateInput = internal.ScheduleClientCreateInput
// Header provides Temporal header information from the context for reading or
// writing during specific interceptor calls.
//
// This returns a non-nil map only for contexts inside
// ActivityInboundInterceptor.ExecuteActivity,
// ClientOutboundInterceptor.ExecuteWorkflow, and
// ClientOutboundInterceptor.SignalWithStartWorkflow.
func Header(ctx context.Context) map[string]*commonpb.Payload {
return internal.Header(ctx)
}
// WorkflowHeader provides Temporal header information from the workflow context
// for reading or writing during specific interceptor calls.
//
// This returns a non-nil map only for contexts inside
// WorkflowInboundInterceptor.ExecuteWorkflow,
// WorkflowOutboundInterceptor.ExecuteActivity,
// WorkflowOutboundInterceptor.ExecuteLocalActivity,
// WorkflowOutboundInterceptor.ExecuteChildWorkflow, and
// WorkflowOutboundInterceptor.NewContinueAsNewError.
func WorkflowHeader(ctx workflow.Context) map[string]*commonpb.Payload {
return internal.WorkflowHeader(ctx)
}

View File

@ -0,0 +1,759 @@
// The MIT License
//
// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package interceptor
import (
"context"
"fmt"
"time"
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/sdk/activity"
"go.temporal.io/sdk/client"
"go.temporal.io/sdk/converter"
"go.temporal.io/sdk/log"
"go.temporal.io/sdk/workflow"
)
const (
workflowIDTagKey = "temporalWorkflowID"
runIDTagKey = "temporalRunID"
activityIDTagKey = "temporalActivityID"
)
// Tracer is an interface for tracing implementations as used by
// NewTracingInterceptor. Most callers do not use this directly, but rather use
// the opentracing or opentelemetry packages.
//
// All implementations must embed BaseTracer to safely
// handle future changes.
type Tracer interface {
// Options returns the options for the tracer. This is only called once on
// initialization.
Options() TracerOptions
// UnmarshalSpan unmarshals the given map into a span reference.
UnmarshalSpan(map[string]string) (TracerSpanRef, error)
// MarshalSpan marshals the given span into a map. If the map is empty with no
// error, the span is simply not set.
MarshalSpan(TracerSpan) (map[string]string, error)
// SpanFromContext returns the span from the general Go context or nil if not
// present.
SpanFromContext(context.Context) TracerSpan
// ContextWithSpan creates a general Go context with the given span set.
ContextWithSpan(context.Context, TracerSpan) context.Context
// StartSpan starts and returns a span with the given options.
StartSpan(*TracerStartSpanOptions) (TracerSpan, error)
// GetLogger returns a log.Logger which may include additional fields in its
// output in order to support correlation of tracing and log data.
GetLogger(log.Logger, TracerSpanRef) log.Logger
mustEmbedBaseTracer()
}
// BaseTracer is a default implementation of Tracer meant for embedding.
type BaseTracer struct{}
func (BaseTracer) GetLogger(logger log.Logger, ref TracerSpanRef) log.Logger {
return logger
}
//lint:ignore U1000 Ignore unused method; it is only required to implement the Tracer interface but will never be called.
func (BaseTracer) mustEmbedBaseTracer() {}
// TracerOptions are options returned from Tracer.Options.
type TracerOptions struct {
// SpanContextKey provides a key to put a span on a context unrelated to how a
// span might otherwise be put on a context by ContextWithSpan. This should
// never be nil.
//
// This is used internally to set the span on contexts not natively supported
// by tracing systems such as workflow.Context.
SpanContextKey interface{}
// HeaderKey is the key name on the Temporal header to serialize the span to.
// This should never be empty.
HeaderKey string
// DisableSignalTracing can be set to disable signal tracing.
DisableSignalTracing bool
// DisableQueryTracing can be set to disable query tracing.
DisableQueryTracing bool
}
// TracerStartSpanOptions are options for Tracer.StartSpan.
type TracerStartSpanOptions struct {
// Parent is the optional parent reference of the span.
Parent TracerSpanRef
// Operation is the general operation name without the specific name.
Operation string
// Name is the specific activity, workflow, etc for the operation.
Name string
// Time indicates the start time of the span.
//
// For RunWorkflow and RunActivity operation types, this will match workflow.Info.WorkflowStartTime and
// activity.Info.StartedTime respectively. All other operations use time.Now().
Time time.Time
// DependedOn is true if the parent depends on this span or false if it just
// is related to the parent. In OpenTracing terms, this is true for "ChildOf"
// reference types and false for "FollowsFrom" reference types.
DependedOn bool
// Tags are a set of span tags.
Tags map[string]string
// FromHeader is used internally, not by tracer implementations, to determine
// whether the parent span can be retrieved from the Temporal header.
FromHeader bool
// ToHeader is used internally, not by tracer implementations, to determine
// whether the span should be placed on the Temporal header.
ToHeader bool
// IdempotencyKey may optionally be used by tracing implementations to generate
// deterministic span IDs.
//
// This is useful in workflow contexts where spans may need to be "resumed" before
// ultimately being reported. Generating a deterministic span ID ensures that any
// child spans created before the parent span is resumed do not become orphaned.
//
// IdempotencyKey is not guaranteed to be set for all operations; Tracer
// implementations MUST therefore ignore zero values for this field.
//
// IdempotencyKey should be treated as opaque data by Tracer implementations.
// Do not attempt to parse it, as the format is subject to change.
IdempotencyKey string
}
// TracerSpanRef represents a span reference such as a parent.
type TracerSpanRef interface{}
// TracerSpan represents a span.
type TracerSpan interface {
TracerSpanRef
// Finish is called when the span is complete.
Finish(*TracerFinishSpanOptions)
}
// TracerFinishSpanOptions are options for TracerSpan.Finish.
type TracerFinishSpanOptions struct {
// Error is present if there was an error in the code traced by this specific
// span.
Error error
}
type tracingInterceptor struct {
InterceptorBase
tracer Tracer
options TracerOptions
}
// NewTracingInterceptor creates a new interceptor using the given tracer. Most
// callers do not use this directly, but rather use the opentracing or
// opentelemetry packages. This panics if options are not set as expected.
func NewTracingInterceptor(tracer Tracer) Interceptor {
options := tracer.Options()
if options.SpanContextKey == nil {
panic("missing span context key")
} else if options.HeaderKey == "" {
panic("missing header key")
}
return &tracingInterceptor{tracer: tracer, options: options}
}
func (t *tracingInterceptor) InterceptClient(next ClientOutboundInterceptor) ClientOutboundInterceptor {
i := &tracingClientOutboundInterceptor{root: t}
i.Next = next
return i
}
func (t *tracingInterceptor) InterceptActivity(
ctx context.Context,
next ActivityInboundInterceptor,
) ActivityInboundInterceptor {
i := &tracingActivityInboundInterceptor{root: t}
i.Next = next
return i
}
func (t *tracingInterceptor) InterceptWorkflow(
ctx workflow.Context,
next WorkflowInboundInterceptor,
) WorkflowInboundInterceptor {
i := &tracingWorkflowInboundInterceptor{root: t, info: workflow.GetInfo(ctx)}
i.Next = next
return i
}
type tracingClientOutboundInterceptor struct {
ClientOutboundInterceptorBase
root *tracingInterceptor
}
func (t *tracingClientOutboundInterceptor) CreateSchedule(ctx context.Context, in *ScheduleClientCreateInput) (client.ScheduleHandle, error) {
// Start span and write to header
span, ctx, err := t.root.startSpanFromContext(ctx, &TracerStartSpanOptions{
Operation: "CreateSchedule",
Name: in.Options.ID,
ToHeader: true,
Time: time.Now(),
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
run, err := t.Next.CreateSchedule(ctx, in)
finishOpts.Error = err
return run, err
}
func (t *tracingClientOutboundInterceptor) ExecuteWorkflow(
ctx context.Context,
in *ClientExecuteWorkflowInput,
) (client.WorkflowRun, error) {
// Start span and write to header
span, ctx, err := t.root.startSpanFromContext(ctx, &TracerStartSpanOptions{
Operation: "StartWorkflow",
Name: in.WorkflowType,
Tags: map[string]string{workflowIDTagKey: in.Options.ID},
ToHeader: true,
Time: time.Now(),
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
run, err := t.Next.ExecuteWorkflow(ctx, in)
finishOpts.Error = err
return run, err
}
func (t *tracingClientOutboundInterceptor) SignalWorkflow(ctx context.Context, in *ClientSignalWorkflowInput) error {
// Only add tracing if enabled
if t.root.options.DisableSignalTracing {
return t.Next.SignalWorkflow(ctx, in)
}
// Start span and write to header
span, ctx, err := t.root.startSpanFromContext(ctx, &TracerStartSpanOptions{
Operation: "SignalWorkflow",
Name: in.SignalName,
Tags: map[string]string{workflowIDTagKey: in.WorkflowID},
ToHeader: true,
Time: time.Now(),
})
if err != nil {
return err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
err = t.Next.SignalWorkflow(ctx, in)
finishOpts.Error = err
return err
}
func (t *tracingClientOutboundInterceptor) SignalWithStartWorkflow(
ctx context.Context,
in *ClientSignalWithStartWorkflowInput,
) (client.WorkflowRun, error) {
// Start span and write to header
span, ctx, err := t.root.startSpanFromContext(ctx, &TracerStartSpanOptions{
Operation: "SignalWithStartWorkflow",
Name: in.WorkflowType,
Tags: map[string]string{workflowIDTagKey: in.Options.ID},
ToHeader: true,
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
run, err := t.Next.SignalWithStartWorkflow(ctx, in)
finishOpts.Error = err
return run, err
}
func (t *tracingClientOutboundInterceptor) QueryWorkflow(
ctx context.Context,
in *ClientQueryWorkflowInput,
) (converter.EncodedValue, error) {
// Only add tracing if enabled
if t.root.options.DisableQueryTracing {
return t.Next.QueryWorkflow(ctx, in)
}
// Start span and write to header
span, ctx, err := t.root.startSpanFromContext(ctx, &TracerStartSpanOptions{
Operation: "QueryWorkflow",
Name: in.QueryType,
Tags: map[string]string{workflowIDTagKey: in.WorkflowID},
ToHeader: true,
Time: time.Now(),
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
val, err := t.Next.QueryWorkflow(ctx, in)
finishOpts.Error = err
return val, err
}
type tracingActivityOutboundInterceptor struct {
ActivityOutboundInterceptorBase
root *tracingInterceptor
}
func (t *tracingActivityOutboundInterceptor) GetLogger(ctx context.Context) log.Logger {
if span := t.root.tracer.SpanFromContext(ctx); span != nil {
return t.root.tracer.GetLogger(t.Next.GetLogger(ctx), span)
}
return t.Next.GetLogger(ctx)
}
type tracingActivityInboundInterceptor struct {
ActivityInboundInterceptorBase
root *tracingInterceptor
}
func (t *tracingActivityInboundInterceptor) Init(outbound ActivityOutboundInterceptor) error {
i := &tracingActivityOutboundInterceptor{root: t.root}
i.Next = outbound
return t.Next.Init(i)
}
func (t *tracingActivityInboundInterceptor) ExecuteActivity(
ctx context.Context,
in *ExecuteActivityInput,
) (interface{}, error) {
// Start span reading from header
info := activity.GetInfo(ctx)
span, ctx, err := t.root.startSpanFromContext(ctx, &TracerStartSpanOptions{
Operation: "RunActivity",
Name: info.ActivityType.Name,
DependedOn: true,
Tags: map[string]string{
workflowIDTagKey: info.WorkflowExecution.ID,
runIDTagKey: info.WorkflowExecution.RunID,
activityIDTagKey: info.ActivityID,
},
FromHeader: true,
Time: info.StartedTime,
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
ret, err := t.Next.ExecuteActivity(ctx, in)
finishOpts.Error = err
return ret, err
}
type tracingWorkflowInboundInterceptor struct {
WorkflowInboundInterceptorBase
root *tracingInterceptor
spanCounter uint16
info *workflow.Info
}
// newIdempotencyKey returns a new idempotency key by incrementing the span counter and interpolating
// this new value into a string that includes the workflow namespace/id/run id and the interceptor type.
func (t *tracingWorkflowInboundInterceptor) newIdempotencyKey() string {
t.spanCounter++
return fmt.Sprintf("WorkflowInboundInterceptor:%s:%s:%s:%d",
t.info.Namespace,
t.info.WorkflowExecution.ID,
t.info.WorkflowExecution.RunID,
t.spanCounter)
}
func (t *tracingWorkflowInboundInterceptor) Init(outbound WorkflowOutboundInterceptor) error {
i := &tracingWorkflowOutboundInterceptor{root: t.root}
i.Next = outbound
return t.Next.Init(i)
}
func (t *tracingWorkflowInboundInterceptor) ExecuteWorkflow(
ctx workflow.Context,
in *ExecuteWorkflowInput,
) (interface{}, error) {
// Start span reading from header
span, ctx, err := t.root.startSpanFromWorkflowContext(ctx, &TracerStartSpanOptions{
Operation: "RunWorkflow",
Name: t.info.WorkflowType.Name,
Tags: map[string]string{
workflowIDTagKey: t.info.WorkflowExecution.ID,
runIDTagKey: t.info.WorkflowExecution.RunID,
},
FromHeader: true,
Time: t.info.WorkflowStartTime,
IdempotencyKey: t.newIdempotencyKey(),
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
ret, err := t.Next.ExecuteWorkflow(ctx, in)
finishOpts.Error = err
return ret, err
}
func (t *tracingWorkflowInboundInterceptor) HandleSignal(ctx workflow.Context, in *HandleSignalInput) error {
// Only add tracing if enabled and not replaying
if t.root.options.DisableSignalTracing || workflow.IsReplaying(ctx) {
return t.Next.HandleSignal(ctx, in)
}
// Start span reading from header
info := workflow.GetInfo(ctx)
span, ctx, err := t.root.startSpanFromWorkflowContext(ctx, &TracerStartSpanOptions{
Operation: "HandleSignal",
Name: in.SignalName,
Tags: map[string]string{
workflowIDTagKey: info.WorkflowExecution.ID,
runIDTagKey: info.WorkflowExecution.RunID,
},
FromHeader: true,
Time: time.Now(),
IdempotencyKey: t.newIdempotencyKey(),
})
if err != nil {
return err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
err = t.Next.HandleSignal(ctx, in)
finishOpts.Error = err
return err
}
func (t *tracingWorkflowInboundInterceptor) HandleQuery(
ctx workflow.Context,
in *HandleQueryInput,
) (interface{}, error) {
// Only add tracing if enabled and not replaying
if t.root.options.DisableQueryTracing || workflow.IsReplaying(ctx) {
return t.Next.HandleQuery(ctx, in)
}
// Start span reading from header
info := workflow.GetInfo(ctx)
span, ctx, err := t.root.startSpanFromWorkflowContext(ctx, &TracerStartSpanOptions{
Operation: "HandleQuery",
Name: in.QueryType,
Tags: map[string]string{
workflowIDTagKey: info.WorkflowExecution.ID,
runIDTagKey: info.WorkflowExecution.RunID,
},
FromHeader: true,
Time: time.Now(),
// We intentionally do not set IdempotencyKey here because queries are not recorded in
// workflow history. When the tracing interceptor's span counter is reset between workflow
// replays, old queries will not be processed which could result in idempotency key
// collisions with other queries or signals.
})
if err != nil {
return nil, err
}
var finishOpts TracerFinishSpanOptions
defer span.Finish(&finishOpts)
val, err := t.Next.HandleQuery(ctx, in)
finishOpts.Error = err
return val, err
}
type tracingWorkflowOutboundInterceptor struct {
WorkflowOutboundInterceptorBase
root *tracingInterceptor
}
func (t *tracingWorkflowOutboundInterceptor) ExecuteActivity(
ctx workflow.Context,
activityType string,
args ...interface{},
) workflow.Future {
// Start span writing to header
span, ctx, err := t.startNonReplaySpan(ctx, "StartActivity", activityType, true)
if err != nil {
return err
}
defer span.Finish(&TracerFinishSpanOptions{})
return t.Next.ExecuteActivity(ctx, activityType, args...)
}
func (t *tracingWorkflowOutboundInterceptor) ExecuteLocalActivity(
ctx workflow.Context,
activityType string,
args ...interface{},
) workflow.Future {
// Start span writing to header
span, ctx, err := t.startNonReplaySpan(ctx, "StartActivity", activityType, true)
if err != nil {
return err
}
defer span.Finish(&TracerFinishSpanOptions{})
return t.Next.ExecuteLocalActivity(ctx, activityType, args...)
}
func (t *tracingWorkflowOutboundInterceptor) GetLogger(ctx workflow.Context) log.Logger {
if span, _ := ctx.Value(t.root.options.SpanContextKey).(TracerSpan); span != nil {
return t.root.tracer.GetLogger(t.Next.GetLogger(ctx), span)
}
return t.Next.GetLogger(ctx)
}
func (t *tracingWorkflowOutboundInterceptor) ExecuteChildWorkflow(
ctx workflow.Context,
childWorkflowType string,
args ...interface{},
) workflow.ChildWorkflowFuture {
// Start span writing to header
span, ctx, err := t.startNonReplaySpan(ctx, "StartChildWorkflow", childWorkflowType, false)
if err != nil {
return err
}
defer span.Finish(&TracerFinishSpanOptions{})
return t.Next.ExecuteChildWorkflow(ctx, childWorkflowType, args...)
}
func (t *tracingWorkflowOutboundInterceptor) SignalExternalWorkflow(
ctx workflow.Context,
workflowID string,
runID string,
signalName string,
arg interface{},
) workflow.Future {
// Start span writing to header if enabled
if !t.root.options.DisableSignalTracing {
var span TracerSpan
var futErr workflow.ChildWorkflowFuture
span, ctx, futErr = t.startNonReplaySpan(ctx, "SignalExternalWorkflow", signalName, false)
if futErr != nil {
return futErr
}
defer span.Finish(&TracerFinishSpanOptions{})
}
return t.Next.SignalExternalWorkflow(ctx, workflowID, runID, signalName, arg)
}
func (t *tracingWorkflowOutboundInterceptor) SignalChildWorkflow(
ctx workflow.Context,
workflowID string,
signalName string,
arg interface{},
) workflow.Future {
// Start span writing to header if enabled
if !t.root.options.DisableSignalTracing {
var span TracerSpan
var futErr workflow.ChildWorkflowFuture
span, ctx, futErr = t.startNonReplaySpan(ctx, "SignalChildWorkflow", signalName, false)
if futErr != nil {
return futErr
}
defer span.Finish(&TracerFinishSpanOptions{})
}
return t.Next.SignalChildWorkflow(ctx, workflowID, signalName, arg)
}
func (t *tracingWorkflowOutboundInterceptor) NewContinueAsNewError(
ctx workflow.Context,
wfn interface{},
args ...interface{},
) error {
err := t.Next.NewContinueAsNewError(ctx, wfn, args...)
if !workflow.IsReplaying(ctx) {
if contErr, _ := err.(*workflow.ContinueAsNewError); contErr != nil {
// Get the current span and write header
if span, _ := ctx.Value(t.root.options.SpanContextKey).(TracerSpan); span != nil {
if writeErr := t.root.writeSpanToHeader(span, WorkflowHeader(ctx)); writeErr != nil {
return fmt.Errorf("failed writing span when creating continue as new error: %w", writeErr)
}
}
}
}
return err
}
type nopSpan struct{}
func (nopSpan) Finish(*TracerFinishSpanOptions) {}
// Span always returned, even in replay. futErr is non-nil on error.
func (t *tracingWorkflowOutboundInterceptor) startNonReplaySpan(
ctx workflow.Context,
operation string,
name string,
dependedOn bool,
) (span TracerSpan, newCtx workflow.Context, futErr workflow.ChildWorkflowFuture) {
// Noop span if replaying
if workflow.IsReplaying(ctx) {
return nopSpan{}, ctx, nil
}
info := workflow.GetInfo(ctx)
span, newCtx, err := t.root.startSpanFromWorkflowContext(ctx, &TracerStartSpanOptions{
Operation: operation,
Name: name,
DependedOn: dependedOn,
Tags: map[string]string{
workflowIDTagKey: info.WorkflowExecution.ID,
runIDTagKey: info.WorkflowExecution.RunID,
},
ToHeader: true,
Time: time.Now(),
})
if err != nil {
return nopSpan{}, ctx, newErrFut(ctx, err)
}
return span, newCtx, nil
}
func (t *tracingInterceptor) startSpanFromContext(
ctx context.Context,
options *TracerStartSpanOptions,
) (TracerSpan, context.Context, error) {
// Try to get parent from context
options.Parent = t.tracer.SpanFromContext(ctx)
span, err := t.startSpan(ctx, Header(ctx), options)
if err != nil {
return nil, nil, err
}
return span, t.tracer.ContextWithSpan(context.WithValue(ctx, t.options.SpanContextKey, span), span), nil
}
func (t *tracingInterceptor) startSpanFromWorkflowContext(
ctx workflow.Context,
options *TracerStartSpanOptions,
) (TracerSpan, workflow.Context, error) {
span, err := t.startSpan(ctx, WorkflowHeader(ctx), options)
if err != nil {
return nil, nil, err
}
return span, workflow.WithValue(ctx, t.options.SpanContextKey, span), nil
}
// Note, this does not put the span on the context
func (t *tracingInterceptor) startSpan(
ctx interface{ Value(interface{}) interface{} },
header map[string]*commonpb.Payload,
options *TracerStartSpanOptions,
) (TracerSpan, error) {
// Get parent span from header if not already present and allowed
if options.Parent == nil && options.FromHeader {
if span, err := t.readSpanFromHeader(header); err != nil {
return nil, err
} else if span != nil {
options.Parent = span
}
}
// If no parent span, try to get from context
if options.Parent == nil {
options.Parent, _ = ctx.Value(t.options.SpanContextKey).(TracerSpan)
}
// Start the span
span, err := t.tracer.StartSpan(options)
if err != nil {
return nil, err
}
// Put span in header if wanted
if options.ToHeader && header != nil {
if err := t.writeSpanToHeader(span, header); err != nil {
return nil, err
}
}
return span, nil
}
func (t *tracingInterceptor) readSpanFromHeader(header map[string]*commonpb.Payload) (TracerSpanRef, error) {
// Get from map
payload := header[t.options.HeaderKey]
if payload == nil {
return nil, nil
}
// Convert from the payload
var data map[string]string
if err := converter.GetDefaultDataConverter().FromPayload(payload, &data); err != nil {
return nil, err
}
// Unmarshal
return t.tracer.UnmarshalSpan(data)
}
func (t *tracingInterceptor) writeSpanToHeader(span TracerSpan, header map[string]*commonpb.Payload) error {
// Serialize span to map
data, err := t.tracer.MarshalSpan(span)
if err != nil || len(data) == 0 {
return err
}
// Convert to payload
payload, err := converter.GetDefaultDataConverter().ToPayload(data)
if err != nil {
return err
}
// Put on header
header[t.options.HeaderKey] = payload
return nil
}
func newErrFut(ctx workflow.Context, err error) workflow.ChildWorkflowFuture {
fut, set := workflow.NewFuture(ctx)
set.SetError(err)
return errFut{fut}
}
type errFut struct{ workflow.Future }
func (e errFut) GetChildWorkflowExecution() workflow.Future { return e }
func (e errFut) SignalChildWorkflow(ctx workflow.Context, signalName string, data interface{}) workflow.Future {
return e
}

12
vendor/modules.txt vendored
View File

@ -216,6 +216,9 @@ github.com/beorn7/perks/quantile
# github.com/blang/semver/v4 v4.0.0
## explicit; go 1.14
github.com/blang/semver/v4
# github.com/bluekeyes/go-gitdiff v0.5.0
## explicit; go 1.13
github.com/bluekeyes/go-gitdiff/gitdiff
# github.com/bmatcuk/doublestar/v4 v4.6.0
## explicit; go 1.16
github.com/bmatcuk/doublestar/v4
@ -932,6 +935,13 @@ github.com/robfig/cron
# github.com/robfig/cron/v3 v3.0.1
## explicit; go 1.12
github.com/robfig/cron/v3
# github.com/rocky-linux/srpmproc v0.5.0
## explicit; go 1.18
github.com/rocky-linux/srpmproc/pb
github.com/rocky-linux/srpmproc/pkg/blob
github.com/rocky-linux/srpmproc/pkg/data
github.com/rocky-linux/srpmproc/pkg/directives
github.com/rocky-linux/srpmproc/pkg/misc
# github.com/russellhaering/goxmldsig v1.4.0
## explicit; go 1.15
github.com/russellhaering/goxmldsig
@ -1270,6 +1280,7 @@ go.temporal.io/api/workflowservicemock/v1
go.temporal.io/sdk/activity
go.temporal.io/sdk/client
go.temporal.io/sdk/converter
go.temporal.io/sdk/interceptor
go.temporal.io/sdk/internal
go.temporal.io/sdk/internal/common
go.temporal.io/sdk/internal/common/backoff
@ -1918,3 +1929,4 @@ sigs.k8s.io/yaml
# go.resf.org/peridot/tools/mothership/pb => ./bazel-bin/tools/mothership/proto/v1/mothershippb_go_proto_/go.resf.org/peridot/tools/mothership/pb
# go.resf.org/peridot/third_party/bazel/src/main/protobuf => ./bazel-bin/third_party/bazel/src/main/protobuf/blaze_query_go_proto_/go.resf.org/peridot/third_party/bazel/src/main/protobuf
# go.resf.org/peridot/tools/mothership/admin/pb => ./bazel-bin/tools/mothership/proto/admin/v1/mshipadminpb_go_proto_/go.resf.org/peridot/tools/mothership/admin/pb
# google.golang.org/genproto/googleapis/longrunning => ./bazel-bin/third_party/googleapis/google/longrunning/longrunning_go_proto_/google.golang.org/genproto/googleapis/longrunning