diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml index 09cb3dae..f548c0a6 100644 --- a/.github/actions/run-tests/action.yml +++ b/.github/actions/run-tests/action.yml @@ -54,7 +54,7 @@ runs: } } }; - var args = ['test', '-v', '-cover', '-coverprofile=coverage.txt', '-covermode=atomic', '-timeout', '15m']; + var args = ['test', '-v', '-cover', '-coverprofile=coverage.txt', '-covermode=atomic', '-timeout', '20m']; var filter = process.env.FILTER; if(filter) { args.push('-run'); diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index b364c6f4..0ba0ad56 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -8,7 +8,6 @@ concurrency: env: ACT_OWNER: ${{ github.repository_owner }} ACT_REPOSITORY: ${{ github.repository }} - GO_VERSION: 1.18 CGO_ENABLED: 0 jobs: @@ -21,12 +20,13 @@ jobs: fetch-depth: 0 - uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: go.mod check-latest: true - - uses: golangci/golangci-lint-action@v3.4.0 + - uses: golangci/golangci-lint-action@v3.6.0 with: - version: v1.47.2 - - uses: megalinter/megalinter/flavors/go@v7.0.2 + version: v1.53 + only-new-issues: true + - uses: megalinter/megalinter/flavors/go@v7.1.0 env: DEFAULT_BRANCH: master GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -45,7 +45,7 @@ jobs: uses: docker/setup-qemu-action@v2 - uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: go.mod check-latest: true - uses: actions/cache@v3 if: ${{ !env.ACT }} @@ -80,7 +80,7 @@ jobs: fetch-depth: 2 - uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: go.mod check-latest: true - name: Run Tests uses: ./.github/actions/run-tests @@ -95,7 +95,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: go.mod check-latest: true - uses: actions/cache@v3 if: ${{ !env.ACT }} @@ -108,7 +108,7 @@ jobs: uses: goreleaser/goreleaser-action@v4 with: version: latest - args: release --snapshot --rm-dist + args: release --snapshot --clean - name: Capture x86_64 (64-bit) Linux binary if: ${{ !env.ACT }} uses: actions/upload-artifact@v3 diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 7ea6c2ab..d6cd1eab 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -4,9 +4,6 @@ on: - cron: '0 2 1 * *' workflow_dispatch: {} -env: - GO_VERSION: 1.18 - jobs: release: name: promote @@ -20,7 +17,7 @@ jobs: - uses: fregante/setup-git-user@v2 - uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: go.mod check-latest: true - uses: actions/cache@v3 if: ${{ !env.ACT }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d990730d..9d4a45ab 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,9 +4,6 @@ on: tags: - v* -env: - GO_VERSION: 1.18 - jobs: release: name: release @@ -17,7 +14,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: go.mod check-latest: true - uses: actions/cache@v3 if: ${{ !env.ACT }} @@ -30,7 +27,7 @@ jobs: uses: goreleaser/goreleaser-action@v4 with: version: latest - args: release --rm-dist + args: release --clean env: GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN }} - name: Chocolatey diff --git a/act/artifactcache/handler.go b/act/artifactcache/handler.go index f11def68..b6600b6b 100644 --- a/act/artifactcache/handler.go +++ b/act/artifactcache/handler.go @@ -27,7 +27,7 @@ const ( ) type Handler struct { - db *bolthold.Store + dir string storage *Storage router *httprouter.Router listener net.Listener @@ -62,19 +62,7 @@ func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger return nil, err } - db, err := bolthold.Open(filepath.Join(dir, "bolt.db"), 0o644, &bolthold.Options{ - Encoder: json.Marshal, - Decoder: json.Unmarshal, - Options: &bbolt.Options{ - Timeout: 5 * time.Second, - NoGrowSync: bbolt.DefaultOptions.NoGrowSync, - FreelistType: bbolt.DefaultOptions.FreelistType, - }, - }) - if err != nil { - return nil, err - } - h.db = db + h.dir = dir storage, err := NewStorage(filepath.Join(dir, "cache")) if err != nil { @@ -150,16 +138,21 @@ func (h *Handler) Close() error { } h.listener = nil } - if h.db != nil { - err := h.db.Close() - if err != nil { - retErr = err - } - h.db = nil - } return retErr } +func (h *Handler) openDB() (*bolthold.Store, error) { + return bolthold.Open(filepath.Join(h.dir, "bolt.db"), 0o644, &bolthold.Options{ + Encoder: json.Marshal, + Decoder: json.Unmarshal, + Options: &bbolt.Options{ + Timeout: 5 * time.Second, + NoGrowSync: bbolt.DefaultOptions.NoGrowSync, + FreelistType: bbolt.DefaultOptions.FreelistType, + }, + }) +} + // GET /_apis/artifactcache/cache func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { keys := strings.Split(r.URL.Query().Get("keys"), ",") @@ -169,7 +162,14 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para } version := r.URL.Query().Get("version") - cache, err := h.findCache(keys, version) + db, err := h.openDB() + if err != nil { + h.responseJSON(w, r, 500, err) + return + } + defer db.Close() + + cache, err := h.findCache(db, keys, version) if err != nil { h.responseJSON(w, r, 500, err) return @@ -183,7 +183,7 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para h.responseJSON(w, r, 500, err) return } else if !ok { - _ = h.db.Delete(cache.ID, cache) + _ = db.Delete(cache.ID, cache) h.responseJSON(w, r, 204) return } @@ -206,7 +206,13 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P cache := api.ToCache() cache.FillKeyVersionHash() - if err := h.db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil { + db, err := h.openDB() + if err != nil { + h.responseJSON(w, r, 500, err) + return + } + defer db.Close() + if err := db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil { if !errors.Is(err, bolthold.ErrNotFound) { h.responseJSON(w, r, 500, err) return @@ -219,12 +225,12 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P now := time.Now().Unix() cache.CreatedAt = now cache.UsedAt = now - if err := h.db.Insert(bolthold.NextSequence(), cache); err != nil { + if err := db.Insert(bolthold.NextSequence(), cache); err != nil { h.responseJSON(w, r, 500, err) return } // write back id to db - if err := h.db.Update(cache.ID, cache); err != nil { + if err := db.Update(cache.ID, cache); err != nil { h.responseJSON(w, r, 500, err) return } @@ -242,7 +248,13 @@ func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprout } cache := &Cache{} - if err := h.db.Get(id, cache); err != nil { + db, err := h.openDB() + if err != nil { + h.responseJSON(w, r, 500, err) + return + } + defer db.Close() + if err := db.Get(id, cache); err != nil { if errors.Is(err, bolthold.ErrNotFound) { h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id)) return @@ -255,6 +267,7 @@ func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprout h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key)) return } + db.Close() start, _, err := parseContentRange(r.Header.Get("Content-Range")) if err != nil { h.responseJSON(w, r, 400, err) @@ -276,7 +289,13 @@ func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprout } cache := &Cache{} - if err := h.db.Get(id, cache); err != nil { + db, err := h.openDB() + if err != nil { + h.responseJSON(w, r, 500, err) + return + } + defer db.Close() + if err := db.Get(id, cache); err != nil { if errors.Is(err, bolthold.ErrNotFound) { h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id)) return @@ -290,13 +309,25 @@ func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprout return } - if err := h.storage.Commit(cache.ID, cache.Size); err != nil { + db.Close() + + size, err := h.storage.Commit(cache.ID, cache.Size) + if err != nil { h.responseJSON(w, r, 500, err) return } + // write real size back to cache, it may be different from the current value when the request doesn't specify it. + cache.Size = size + + db, err = h.openDB() + if err != nil { + h.responseJSON(w, r, 500, err) + return + } + defer db.Close() cache.Complete = true - if err := h.db.Update(cache.ID, cache); err != nil { + if err := db.Update(cache.ID, cache); err != nil { h.responseJSON(w, r, 500, err) return } @@ -332,7 +363,7 @@ func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle { } // if not found, return (nil, nil) instead of an error. -func (h *Handler) findCache(keys []string, version string) (*Cache, error) { +func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) { if len(keys) == 0 { return nil, nil } @@ -344,7 +375,7 @@ func (h *Handler) findCache(keys []string, version string) (*Cache, error) { } cache.FillKeyVersionHash() - if err := h.db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil { + if err := db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil { if !errors.Is(err, bolthold.ErrNotFound) { return nil, err } @@ -355,7 +386,7 @@ func (h *Handler) findCache(keys []string, version string) (*Cache, error) { for _, prefix := range keys[1:] { found := false - if err := h.db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error { + if err := db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error { if !strings.HasPrefix(v.Key, prefix) { return stop } @@ -378,12 +409,17 @@ func (h *Handler) findCache(keys []string, version string) (*Cache, error) { } func (h *Handler) useCache(id int64) { + db, err := h.openDB() + if err != nil { + return + } + defer db.Close() cache := &Cache{} - if err := h.db.Get(id, cache); err != nil { + if err := db.Get(id, cache); err != nil { return } cache.UsedAt = time.Now().Unix() - _ = h.db.Update(cache.ID, cache) + _ = db.Update(cache.ID, cache) } func (h *Handler) gcCache() { @@ -408,8 +444,14 @@ func (h *Handler) gcCache() { keepTemp = 5 * time.Minute ) + db, err := h.openDB() + if err != nil { + return + } + defer db.Close() + var caches []*Cache - if err := h.db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix())); err != nil { + if err := db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix())); err != nil { h.logger.Warnf("find caches: %v", err) } else { for _, cache := range caches { @@ -417,7 +459,7 @@ func (h *Handler) gcCache() { continue } h.storage.Remove(cache.ID) - if err := h.db.Delete(cache.ID, cache); err != nil { + if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } @@ -426,12 +468,12 @@ func (h *Handler) gcCache() { } caches = caches[:0] - if err := h.db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix())); err != nil { + if err := db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix())); err != nil { h.logger.Warnf("find caches: %v", err) } else { for _, cache := range caches { h.storage.Remove(cache.ID) - if err := h.db.Delete(cache.ID, cache); err != nil { + if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } @@ -440,12 +482,12 @@ func (h *Handler) gcCache() { } caches = caches[:0] - if err := h.db.Find(&caches, bolthold.Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix())); err != nil { + if err := db.Find(&caches, bolthold.Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix())); err != nil { h.logger.Warnf("find caches: %v", err) } else { for _, cache := range caches { h.storage.Remove(cache.ID) - if err := h.db.Delete(cache.ID, cache); err != nil { + if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } diff --git a/act/artifactcache/handler_test.go b/act/artifactcache/handler_test.go index 7c6840a1..35ec7537 100644 --- a/act/artifactcache/handler_test.go +++ b/act/artifactcache/handler_test.go @@ -25,7 +25,10 @@ func TestHandler(t *testing.T) { defer func() { t.Run("inpect db", func(t *testing.T) { - require.NoError(t, handler.db.Bolt().View(func(tx *bbolt.Tx) error { + db, err := handler.openDB() + require.NoError(t, err) + defer db.Close() + require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error { return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error { t.Logf("%s: %s", k, v) return nil @@ -36,7 +39,6 @@ func TestHandler(t *testing.T) { require.NoError(t, handler.Close()) assert.Nil(t, handler.server) assert.Nil(t, handler.listener) - assert.Nil(t, handler.db) _, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil) assert.Error(t, err) }) diff --git a/act/artifactcache/model.go b/act/artifactcache/model.go index 5c288995..32b8ce51 100644 --- a/act/artifactcache/model.go +++ b/act/artifactcache/model.go @@ -15,11 +15,17 @@ func (c *Request) ToCache() *Cache { if c == nil { return nil } - return &Cache{ + ret := &Cache{ Key: c.Key, Version: c.Version, Size: c.Size, } + if c.Size == 0 { + // So the request comes from old versions of actions, like `actions/cache@v2`. + // It doesn't send cache size. Set it to -1 to indicate that. + ret.Size = -1 + } + return ret } type Cache struct { diff --git a/act/artifactcache/storage.go b/act/artifactcache/storage.go index a49c94e3..9a2609af 100644 --- a/act/artifactcache/storage.go +++ b/act/artifactcache/storage.go @@ -46,7 +46,7 @@ func (s *Storage) Write(id uint64, offset int64, reader io.Reader) error { return err } -func (s *Storage) Commit(id uint64, size int64) error { +func (s *Storage) Commit(id uint64, size int64) (int64, error) { defer func() { _ = os.RemoveAll(s.tempDir(id)) }() @@ -54,15 +54,15 @@ func (s *Storage) Commit(id uint64, size int64) error { name := s.filename(id) tempNames, err := s.tempNames(id) if err != nil { - return err + return 0, err } if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil { - return err + return 0, err } file, err := os.Create(name) if err != nil { - return err + return 0, err } defer file.Close() @@ -70,22 +70,26 @@ func (s *Storage) Commit(id uint64, size int64) error { for _, v := range tempNames { f, err := os.Open(v) if err != nil { - return err + return 0, err } n, err := io.Copy(file, f) _ = f.Close() if err != nil { - return err + return 0, err } written += n } - if written != size { + // If size is less than 0, it means the size is unknown. + // We can't check the size of the file, just skip the check. + // It happens when the request comes from old versions of actions, like `actions/cache@v2`. + if size >= 0 && written != size { _ = file.Close() _ = os.Remove(name) - return fmt.Errorf("broken file: %v != %v", written, size) + return 0, fmt.Errorf("broken file: %v != %v", written, size) } - return nil + + return written, nil } func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id uint64) { diff --git a/act/artifacts/server.go b/act/artifacts/server.go index d0c7a6aa..4b88ea40 100644 --- a/act/artifacts/server.go +++ b/act/artifacts/server.go @@ -79,7 +79,7 @@ func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) { return nil, err } - _, err = file.Seek(0, os.SEEK_END) + _, err = file.Seek(0, io.SeekEnd) if err != nil { return nil, err } @@ -223,9 +223,13 @@ func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) { // if it was upload as gzip rel = strings.TrimSuffix(rel, gzipExtension) + path := filepath.Join(itemPath, rel) + + rel = filepath.ToSlash(rel) + path = filepath.ToSlash(path) files = append(files, ContainerItem{ - Path: filepath.Join(itemPath, rel), + Path: path, ItemType: "file", ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel), }) diff --git a/act/artifacts/server_test.go b/act/artifacts/server_test.go index 943820ca..aeeb0598 100644 --- a/act/artifacts/server_test.go +++ b/act/artifacts/server_test.go @@ -14,10 +14,11 @@ import ( "testing/fstest" "github.com/julienschmidt/httprouter" - "github.com/nektos/act/pkg/model" - "github.com/nektos/act/pkg/runner" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + + "github.com/nektos/act/pkg/model" + "github.com/nektos/act/pkg/runner" ) type writableMapFile struct { @@ -238,9 +239,11 @@ type TestJobFileInfo struct { containerArchitecture string } -var artifactsPath = path.Join(os.TempDir(), "test-artifacts") -var artifactsAddr = "127.0.0.1" -var artifactsPort = "12345" +var ( + artifactsPath = path.Join(os.TempDir(), "test-artifacts") + artifactsAddr = "127.0.0.1" + artifactsPort = "12345" +) func TestArtifactFlow(t *testing.T) { if testing.Short() { @@ -253,7 +256,7 @@ func TestArtifactFlow(t *testing.T) { defer cancel() platforms := map[string]string{ - "ubuntu-latest": "node:16-buster-slim", + "ubuntu-latest": "node:16-buster", // Don't use node:16-buster-slim because it doesn't have curl command, which is used in the tests } tables := []TestJobFileInfo{ diff --git a/act/artifacts/testdata/GHSL-2023-004/artifacts.yml b/act/artifacts/testdata/GHSL-2023-004/artifacts.yml index e717f141..ec801c35 100644 --- a/act/artifacts/testdata/GHSL-2023-004/artifacts.yml +++ b/act/artifacts/testdata/GHSL-2023-004/artifacts.yml @@ -8,9 +8,7 @@ jobs: steps: - run: echo "hello world" > test.txt - name: curl upload - uses: wei/curl@v1 - with: - args: -s --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt + run: curl --silent --show-error --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt - uses: actions/download-artifact@v2 with: name: my-artifact @@ -27,9 +25,7 @@ jobs: exit 1 fi - name: Verify download should work by clean extra dots - uses: wei/curl@v1 - with: - args: --path-as-is -s -o out.txt --fail ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt + run: curl --silent --show-error --fail --path-as-is -o out.txt ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt - name: 'Verify download content' run: | file="out.txt" diff --git a/act/common/executor.go b/act/common/executor.go index c5b05f3b..a5eb079b 100644 --- a/act/common/executor.go +++ b/act/common/executor.go @@ -3,6 +3,8 @@ package common import ( "context" "fmt" + + log "github.com/sirupsen/logrus" ) // Warning that implements `error` but safe to ignore @@ -94,6 +96,11 @@ func NewParallelExecutor(parallel int, executors ...Executor) Executor { work := make(chan Executor, len(executors)) errs := make(chan error, len(executors)) + if 1 > parallel { + log.Infof("Parallel tasks (%d) below minimum, setting to 1", parallel) + parallel = 1 + } + for i := 0; i < parallel; i++ { go func(work <-chan Executor, errs chan<- error) { for executor := range work { diff --git a/act/common/executor_test.go b/act/common/executor_test.go index 7f691e42..e70c638e 100644 --- a/act/common/executor_test.go +++ b/act/common/executor_test.go @@ -100,6 +100,17 @@ func TestNewParallelExecutor(t *testing.T) { assert.Equal(3, count, "should run all 3 executors") assert.Equal(2, maxCount, "should run at most 2 executors in parallel") assert.Nil(err) + + // Reset to test running the executor with 0 parallelism + count = 0 + activeCount = 0 + maxCount = 0 + + errSingle := NewParallelExecutor(0, emptyWorkflow, emptyWorkflow, emptyWorkflow)(ctx) + + assert.Equal(3, count, "should run all 3 executors") + assert.Equal(1, maxCount, "should run at most 1 executors in parallel") + assert.Nil(errSingle) } func TestNewParallelExecutorFailed(t *testing.T) { diff --git a/act/common/git/git.go b/act/common/git/git.go index 954c2cc4..bf771558 100644 --- a/act/common/git/git.go +++ b/act/common/git/git.go @@ -174,7 +174,7 @@ func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string return slug, err } -func findGitRemoteURL(ctx context.Context, file, remoteName string) (string, error) { +func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error) { repo, err := git.PlainOpenWithOptions( file, &git.PlainOpenOptions{ diff --git a/act/container/docker_auth.go b/act/container/docker_auth.go index e47fe64a..9c263f55 100644 --- a/act/container/docker_auth.go +++ b/act/container/docker_auth.go @@ -8,16 +8,16 @@ import ( "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/credentials" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" "github.com/nektos/act/pkg/common" ) -func LoadDockerAuthConfig(ctx context.Context, image string) (types.AuthConfig, error) { +func LoadDockerAuthConfig(ctx context.Context, image string) (registry.AuthConfig, error) { logger := common.Logger(ctx) config, err := config.Load(config.Dir()) if err != nil { logger.Warnf("Could not load docker config: %v", err) - return types.AuthConfig{}, err + return registry.AuthConfig{}, err } if !config.ContainsAuth() { @@ -33,13 +33,13 @@ func LoadDockerAuthConfig(ctx context.Context, image string) (types.AuthConfig, authConfig, err := config.GetAuthConfig(hostName) if err != nil { logger.Warnf("Could not get auth config from docker config: %v", err) - return types.AuthConfig{}, err + return registry.AuthConfig{}, err } - return types.AuthConfig(authConfig), nil + return registry.AuthConfig(authConfig), nil } -func LoadDockerAuthConfigs(ctx context.Context) map[string]types.AuthConfig { +func LoadDockerAuthConfigs(ctx context.Context) map[string]registry.AuthConfig { logger := common.Logger(ctx) config, err := config.Load(config.Dir()) if err != nil { @@ -52,9 +52,9 @@ func LoadDockerAuthConfigs(ctx context.Context) map[string]types.AuthConfig { } creds, _ := config.GetAllCredentials() - authConfigs := make(map[string]types.AuthConfig, len(creds)) + authConfigs := make(map[string]registry.AuthConfig, len(creds)) for k, v := range creds { - authConfigs[k] = types.AuthConfig(v) + authConfigs[k] = registry.AuthConfig(v) } return authConfigs diff --git a/act/container/docker_pull.go b/act/container/docker_pull.go index 6fb29613..ad75958f 100644 --- a/act/container/docker_pull.go +++ b/act/container/docker_pull.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" "github.com/nektos/act/pkg/common" ) @@ -82,7 +83,7 @@ func getImagePullOptions(ctx context.Context, input NewDockerPullExecutorInput) if input.Username != "" && input.Password != "" { logger.Debugf("using authentication for docker pull") - authConfig := types.AuthConfig{ + authConfig := registry.AuthConfig{ Username: input.Username, Password: input.Password, } diff --git a/act/container/docker_run.go b/act/container/docker_run.go index 99d43fee..409cfa95 100644 --- a/act/container/docker_run.go +++ b/act/container/docker_run.go @@ -661,7 +661,7 @@ func (cr *containerReference) tryReadGID() common.Executor { return cr.tryReadID("-g", func(id int) { cr.GID = id }) } -func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse, idResp types.IDResponse, user string, workdir string) error { +func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse, _ types.IDResponse, _ string, _ string) error { logger := common.Logger(ctx) cmdResponse := make(chan error) diff --git a/act/container/docker_run_test.go b/act/container/docker_run_test.go index f288cf64..2a2007af 100644 --- a/act/container/docker_run_test.go +++ b/act/container/docker_run_test.go @@ -82,7 +82,7 @@ type endlessReader struct { io.Reader } -func (r endlessReader) Read(p []byte) (n int, err error) { +func (r endlessReader) Read(_ []byte) (n int, err error) { return 1, nil } diff --git a/act/container/docker_volume.go b/act/container/docker_volume.go index 6eafd33c..0bb2cd7c 100644 --- a/act/container/docker_volume.go +++ b/act/container/docker_volume.go @@ -6,10 +6,11 @@ import ( "context" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" "github.com/nektos/act/pkg/common" ) -func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor { +func NewDockerVolumeRemoveExecutor(volumeName string, force bool) common.Executor { return func(ctx context.Context) error { cli, err := GetDockerClient(ctx) if err != nil { @@ -17,14 +18,14 @@ func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor { } defer cli.Close() - list, err := cli.VolumeList(ctx, filters.NewArgs()) + list, err := cli.VolumeList(ctx, volume.ListOptions{Filters: filters.NewArgs()}) if err != nil { return err } for _, vol := range list.Volumes { - if vol.Name == volume { - return removeExecutor(volume, force)(ctx) + if vol.Name == volumeName { + return removeExecutor(volumeName, force)(ctx) } } diff --git a/act/container/host_environment.go b/act/container/host_environment.go index 3b7fc514..4801b273 100644 --- a/act/container/host_environment.go +++ b/act/container/host_environment.go @@ -34,7 +34,7 @@ type HostEnvironment struct { StdOut io.Writer } -func (e *HostEnvironment) Create(capAdd []string, capDrop []string) common.Executor { +func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor { return func(ctx context.Context) error { return nil } @@ -146,13 +146,13 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin return io.NopCloser(buf), nil } -func (e *HostEnvironment) Pull(forcePull bool) common.Executor { +func (e *HostEnvironment) Pull(_ bool) common.Executor { return func(ctx context.Context) error { return nil } } -func (e *HostEnvironment) Start(attach bool) common.Executor { +func (e *HostEnvironment) Start(_ bool) common.Executor { return func(ctx context.Context) error { return nil } @@ -246,7 +246,7 @@ func copyPtyOutput(writer io.Writer, ppty io.Reader, finishLog context.CancelFun } } -func (e *HostEnvironment) UpdateFromImageEnv(env *map[string]string) common.Executor { +func (e *HostEnvironment) UpdateFromImageEnv(_ *map[string]string) common.Executor { return func(ctx context.Context) error { return nil } @@ -260,7 +260,7 @@ func getEnvListFromMap(env map[string]string) []string { return envList } -func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline string, env map[string]string, user, workdir string) error { +func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline string, env map[string]string, _, workdir string) error { envList := getEnvListFromMap(env) var wd string if workdir != "" { @@ -417,7 +417,7 @@ func goOsToActionOs(os string) string { return os } -func (e *HostEnvironment) GetRunnerContext(ctx context.Context) map[string]interface{} { +func (e *HostEnvironment) GetRunnerContext(_ context.Context) map[string]interface{} { return map[string]interface{}{ "os": goOsToActionOs(runtime.GOOS), "arch": goArchToActionArch(runtime.GOARCH), @@ -426,7 +426,7 @@ func (e *HostEnvironment) GetRunnerContext(ctx context.Context) map[string]inter } } -func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (io.Writer, io.Writer) { +func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, _ io.Writer) (io.Writer, io.Writer) { org := e.StdOut e.StdOut = stdout return org, org diff --git a/act/container/util.go b/act/container/util.go index eb7f46c6..96143a59 100644 --- a/act/container/util.go +++ b/act/container/util.go @@ -9,7 +9,7 @@ import ( "github.com/creack/pty" ) -func getSysProcAttr(cmdLine string, tty bool) *syscall.SysProcAttr { +func getSysProcAttr(_ string, tty bool) *syscall.SysProcAttr { if tty { return &syscall.SysProcAttr{ Setsid: true, diff --git a/act/exprparser/interpreter.go b/act/exprparser/interpreter.go index b30c0b77..f6f8234f 100644 --- a/act/exprparser/interpreter.go +++ b/act/exprparser/interpreter.go @@ -149,7 +149,7 @@ func (impl *interperterImpl) evaluateNode(exprNode actionlint.ExprNode) (interfa } } -// nolint:gocyclo +//nolint:gocyclo func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableNode) (interface{}, error) { switch strings.ToLower(variableNode.Name) { case "github": diff --git a/act/model/workflow.go b/act/model/workflow.go index 6aba2970..f769c3a7 100644 --- a/act/model/workflow.go +++ b/act/model/workflow.go @@ -441,6 +441,7 @@ func (j *Job) GetMatrixes() ([]map[string]interface{}, error) { } } else { matrixes = append(matrixes, make(map[string]interface{})) + log.Debugf("Empty Strategy, matrixes=%v", matrixes) } return matrixes, nil } @@ -468,14 +469,17 @@ func commonKeysMatch2(a map[string]interface{}, b map[string]interface{}, m map[ type JobType int const ( - // StepTypeRun is all steps that have a `run` attribute + // JobTypeDefault is all jobs that have a `run` attribute JobTypeDefault JobType = iota - // StepTypeReusableWorkflowLocal is all steps that have a `uses` that is a local workflow in the .github/workflows directory + // JobTypeReusableWorkflowLocal is all jobs that have a `uses` that is a local workflow in the .github/workflows directory JobTypeReusableWorkflowLocal - // JobTypeReusableWorkflowRemote is all steps that have a `uses` that references a workflow file in a github repo + // JobTypeReusableWorkflowRemote is all jobs that have a `uses` that references a workflow file in a github repo JobTypeReusableWorkflowRemote + + // JobTypeInvalid represents a job which is not configured correctly + JobTypeInvalid ) func (j JobType) String() string { @@ -491,17 +495,28 @@ func (j JobType) String() string { } // Type returns the type of the job -func (j *Job) Type() JobType { - if strings.HasPrefix(j.Uses, "./.github/workflows") && (strings.HasSuffix(j.Uses, ".yml") || strings.HasSuffix(j.Uses, ".yaml")) { - return JobTypeReusableWorkflowLocal - } else if strings.HasPrefix(j.Uses, "./.gitea/workflows") && (strings.HasSuffix(j.Uses, ".yml") || strings.HasSuffix(j.Uses, ".yaml")) { - return JobTypeReusableWorkflowLocal - } else if !strings.HasPrefix(j.Uses, "./") && strings.Contains(j.Uses, ".github/workflows") && (strings.Contains(j.Uses, ".yml@") || strings.Contains(j.Uses, ".yaml@")) { - return JobTypeReusableWorkflowRemote - } else if !strings.HasPrefix(j.Uses, "./") && strings.Contains(j.Uses, ".gitea/workflows") && (strings.Contains(j.Uses, ".yml@") || strings.Contains(j.Uses, ".yaml@")) { - return JobTypeReusableWorkflowRemote +func (j *Job) Type() (JobType, error) { + isReusable := j.Uses != "" + + if isReusable { + isYaml, _ := regexp.MatchString(`\.(ya?ml)(?:$|@)`, j.Uses) + + if isYaml { + isLocalPath := strings.HasPrefix(j.Uses, "./") + isRemotePath, _ := regexp.MatchString(`^[^.](.+?/){2,}.+\.ya?ml@`, j.Uses) + hasVersion, _ := regexp.MatchString(`\.ya?ml@`, j.Uses) + + if isLocalPath { + return JobTypeReusableWorkflowLocal, nil + } else if isRemotePath && hasVersion { + return JobTypeReusableWorkflowRemote, nil + } + } + + return JobTypeInvalid, fmt.Errorf("`uses` key references invalid workflow path '%s'. Must start with './' if it's a local workflow, or must start with '//' and include an '@' if it's a remote workflow", j.Uses) } - return JobTypeDefault + + return JobTypeDefault, nil } // ContainerSpec is the specification of the container to use for the job diff --git a/act/model/workflow_test.go b/act/model/workflow_test.go index 3729add0..8b336629 100644 --- a/act/model/workflow_test.go +++ b/act/model/workflow_test.go @@ -229,20 +229,81 @@ jobs: runs-on: ubuntu-latest steps: - run: echo - remote-reusable-workflow: - runs-on: ubuntu-latest - uses: remote/repo/.github/workflows/workflow.yml@main - local-reusable-workflow: - runs-on: ubuntu-latest - uses: ./.github/workflows/workflow.yml + remote-reusable-workflow-yml: + uses: remote/repo/some/path/to/workflow.yml@main + remote-reusable-workflow-yaml: + uses: remote/repo/some/path/to/workflow.yaml@main + remote-reusable-workflow-custom-path: + uses: remote/repo/path/to/workflow.yml@main + local-reusable-workflow-yml: + uses: ./some/path/to/workflow.yml + local-reusable-workflow-yaml: + uses: ./some/path/to/workflow.yaml ` workflow, err := ReadWorkflow(strings.NewReader(yaml)) assert.NoError(t, err, "read workflow should succeed") - assert.Len(t, workflow.Jobs, 3) - assert.Equal(t, workflow.Jobs["default-job"].Type(), JobTypeDefault) - assert.Equal(t, workflow.Jobs["remote-reusable-workflow"].Type(), JobTypeReusableWorkflowRemote) - assert.Equal(t, workflow.Jobs["local-reusable-workflow"].Type(), JobTypeReusableWorkflowLocal) + assert.Len(t, workflow.Jobs, 6) + + jobType, err := workflow.Jobs["default-job"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeDefault, jobType) + + jobType, err = workflow.Jobs["remote-reusable-workflow-yml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) + + jobType, err = workflow.Jobs["remote-reusable-workflow-yaml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) + + jobType, err = workflow.Jobs["remote-reusable-workflow-custom-path"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) + + jobType, err = workflow.Jobs["local-reusable-workflow-yml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowLocal, jobType) + + jobType, err = workflow.Jobs["local-reusable-workflow-yaml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowLocal, jobType) +} + +func TestReadWorkflow_JobTypes_InvalidPath(t *testing.T) { + yaml := ` +name: invalid job definition + +jobs: + remote-reusable-workflow-missing-version: + uses: remote/repo/some/path/to/workflow.yml + remote-reusable-workflow-bad-extension: + uses: remote/repo/some/path/to/workflow.json + local-reusable-workflow-bad-extension: + uses: ./some/path/to/workflow.json + local-reusable-workflow-bad-path: + uses: some/path/to/workflow.yaml +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 4) + + jobType, err := workflow.Jobs["remote-reusable-workflow-missing-version"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) + + jobType, err = workflow.Jobs["remote-reusable-workflow-bad-extension"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) + + jobType, err = workflow.Jobs["local-reusable-workflow-bad-extension"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) + + jobType, err = workflow.Jobs["local-reusable-workflow-bad-path"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) } func TestReadWorkflow_StepsTypes(t *testing.T) { diff --git a/act/runner/action.go b/act/runner/action.go index 7048f4d3..fa7e342e 100644 --- a/act/runner/action.go +++ b/act/runner/action.go @@ -198,7 +198,7 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction } } -func setupActionEnv(ctx context.Context, step actionStep, remoteAction *remoteAction) error { +func setupActionEnv(ctx context.Context, step actionStep, _ *remoteAction) error { rc := step.getRunContext() // A few fields in the environment (e.g. GITHUB_ACTION_REPOSITORY) diff --git a/act/runner/command.go b/act/runner/command.go index ea93b340..9b59a974 100644 --- a/act/runner/command.go +++ b/act/runner/command.go @@ -172,7 +172,7 @@ func unescapeKvPairs(kvPairs map[string]string) map[string]string { return kvPairs } -func (rc *RunContext) saveState(ctx context.Context, kvPairs map[string]string, arg string) { +func (rc *RunContext) saveState(_ context.Context, kvPairs map[string]string, arg string) { stepID := rc.CurrentStep if stepID != "" { if rc.IntraActionState == nil { diff --git a/act/runner/expression.go b/act/runner/expression.go index cc144afe..23c76ab2 100644 --- a/act/runner/expression.go +++ b/act/runner/expression.go @@ -181,7 +181,7 @@ func (ee expressionEvaluator) evaluateScalarYamlNode(ctx context.Context, node * } func (ee expressionEvaluator) evaluateMappingYamlNode(ctx context.Context, node *yaml.Node) (*yaml.Node, error) { - var ret *yaml.Node = nil + var ret *yaml.Node // GitHub has this undocumented feature to merge maps, called insert directive insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`) for i := 0; i < len(node.Content)/2; i++ { @@ -239,7 +239,7 @@ func (ee expressionEvaluator) evaluateMappingYamlNode(ctx context.Context, node } func (ee expressionEvaluator) evaluateSequenceYamlNode(ctx context.Context, node *yaml.Node) (*yaml.Node, error) { - var ret *yaml.Node = nil + var ret *yaml.Node for i := 0; i < len(node.Content); i++ { v := node.Content[i] // Preserve nested sequences @@ -397,6 +397,7 @@ func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (str return out, nil } +//nolint:gocyclo func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]interface{} { inputs := map[string]interface{}{} @@ -432,6 +433,22 @@ func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *mod } } + if ghc.EventName == "workflow_call" { + config := rc.Run.Workflow.WorkflowCallConfig() + if config != nil && config.Inputs != nil { + for k, v := range config.Inputs { + value := nestedMapLookup(ghc.Event, "inputs", k) + if value == nil { + value = v.Default + } + if v.Type == "boolean" { + inputs[k] = value == "true" + } else { + inputs[k] = value + } + } + } + } return inputs } @@ -486,6 +503,6 @@ func getWorkflowSecrets(ctx context.Context, rc *RunContext) map[string]string { return rc.Config.Secrets } -func getWorkflowVars(ctx context.Context, rc *RunContext) map[string]string { +func getWorkflowVars(_ context.Context, rc *RunContext) map[string]string { return rc.Config.Vars } diff --git a/act/runner/job_executor.go b/act/runner/job_executor.go index 090ed061..67708c09 100644 --- a/act/runner/job_executor.go +++ b/act/runner/job_executor.go @@ -148,7 +148,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo pipeline = append(pipeline, steps...) return common.NewPipelineExecutor(info.startContainer(), common.NewPipelineExecutor(pipeline...). - Finally(func(ctx context.Context) error { + Finally(func(ctx context.Context) error { //nolint:contextcheck var cancel context.CancelFunc if ctx.Err() == context.Canceled { // in case of an aborted run, we still should execute the diff --git a/act/runner/job_executor_test.go b/act/runner/job_executor_test.go index 87c58886..ac7725f6 100644 --- a/act/runner/job_executor_test.go +++ b/act/runner/job_executor_test.go @@ -82,7 +82,7 @@ type jobContainerMock struct { container.LinuxContainerEnvironmentExtensions } -func (jcm *jobContainerMock) ReplaceLogWriter(stdout, stderr io.Writer) (io.Writer, io.Writer) { +func (jcm *jobContainerMock) ReplaceLogWriter(_, _ io.Writer) (io.Writer, io.Writer) { return nil, nil } diff --git a/act/runner/logger.go b/act/runner/logger.go index ca9a6396..68902931 100644 --- a/act/runner/logger.go +++ b/act/runner/logger.go @@ -85,7 +85,8 @@ func WithJobLogger(ctx context.Context, jobID string, jobName string, config *Co defer mux.Unlock() nextColor++ formatter = &jobLogFormatter{ - color: colors[nextColor%len(colors)], + color: colors[nextColor%len(colors)], + logPrefixJobID: config.LogPrefixJobID, } } @@ -188,7 +189,8 @@ func (f *maskedFormatter) Format(entry *logrus.Entry) ([]byte, error) { } type jobLogFormatter struct { - color int + color int + logPrefixJobID bool } func (f *jobLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { @@ -206,7 +208,14 @@ func (f *jobLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { func (f *jobLogFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) { entry.Message = strings.TrimSuffix(entry.Message, "\n") - jobName := entry.Data["job"] + + var job any + if f.logPrefixJobID { + job = entry.Data["jobID"] + } else { + job = entry.Data["job"] + } + debugFlag := "" if entry.Level == logrus.DebugLevel { debugFlag = "[DEBUG] " @@ -215,26 +224,33 @@ func (f *jobLogFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) { if entry.Data["raw_output"] == true { fmt.Fprintf(b, "\x1b[%dm|\x1b[0m %s", f.color, entry.Message) } else if entry.Data["dryrun"] == true { - fmt.Fprintf(b, "\x1b[1m\x1b[%dm\x1b[7m*DRYRUN*\x1b[0m \x1b[%dm[%s] \x1b[0m%s%s", gray, f.color, jobName, debugFlag, entry.Message) + fmt.Fprintf(b, "\x1b[1m\x1b[%dm\x1b[7m*DRYRUN*\x1b[0m \x1b[%dm[%s] \x1b[0m%s%s", gray, f.color, job, debugFlag, entry.Message) } else { - fmt.Fprintf(b, "\x1b[%dm[%s] \x1b[0m%s%s", f.color, jobName, debugFlag, entry.Message) + fmt.Fprintf(b, "\x1b[%dm[%s] \x1b[0m%s%s", f.color, job, debugFlag, entry.Message) } } func (f *jobLogFormatter) print(b *bytes.Buffer, entry *logrus.Entry) { entry.Message = strings.TrimSuffix(entry.Message, "\n") - jobName := entry.Data["job"] + + var job any + if f.logPrefixJobID { + job = entry.Data["jobID"] + } else { + job = entry.Data["job"] + } + debugFlag := "" if entry.Level == logrus.DebugLevel { debugFlag = "[DEBUG] " } if entry.Data["raw_output"] == true { - fmt.Fprintf(b, "[%s] | %s", jobName, entry.Message) + fmt.Fprintf(b, "[%s] | %s", job, entry.Message) } else if entry.Data["dryrun"] == true { - fmt.Fprintf(b, "*DRYRUN* [%s] %s%s", jobName, debugFlag, entry.Message) + fmt.Fprintf(b, "*DRYRUN* [%s] %s%s", job, debugFlag, entry.Message) } else { - fmt.Fprintf(b, "[%s] %s%s", jobName, debugFlag, entry.Message) + fmt.Fprintf(b, "[%s] %s%s", job, debugFlag, entry.Message) } } diff --git a/act/runner/run_context.go b/act/runner/run_context.go index fd3b23c2..d34cc138 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -571,16 +571,19 @@ func (rc *RunContext) steps() []*model.Step { } // Executor returns a pipeline executor for all the steps in the job -func (rc *RunContext) Executor() common.Executor { +func (rc *RunContext) Executor() (common.Executor, error) { var executor common.Executor + var jobType, err = rc.Run.Job().Type() - switch rc.Run.Job().Type() { + switch jobType { case model.JobTypeDefault: executor = newJobExecutor(rc, &stepFactoryImpl{}, rc) case model.JobTypeReusableWorkflowLocal: executor = newLocalReusableWorkflowExecutor(rc) case model.JobTypeReusableWorkflowRemote: executor = newRemoteReusableWorkflowExecutor(rc) + case model.JobTypeInvalid: + return nil, err } return func(ctx context.Context) error { @@ -592,7 +595,7 @@ func (rc *RunContext) Executor() common.Executor { return executor(ctx) } return nil - } + }, nil } func (rc *RunContext) containerImage(ctx context.Context) string { @@ -642,7 +645,7 @@ func (rc *RunContext) platformImage(ctx context.Context) string { return rc.runsOnImage(ctx) } -func (rc *RunContext) options(ctx context.Context) string { +func (rc *RunContext) options(_ context.Context) string { job := rc.Run.Job() c := job.Container() if c == nil { @@ -655,19 +658,24 @@ func (rc *RunContext) options(ctx context.Context) string { func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) { job := rc.Run.Job() l := common.Logger(ctx) - runJob, err := EvalBool(ctx, rc.ExprEval, job.If.Value, exprparser.DefaultStatusCheckSuccess) - if err != nil { - return false, fmt.Errorf(" \u274C Error in if-expression: \"if: %s\" (%s)", job.If.Value, err) + runJob, runJobErr := EvalBool(ctx, rc.ExprEval, job.If.Value, exprparser.DefaultStatusCheckSuccess) + jobType, jobTypeErr := job.Type() + + if runJobErr != nil { + return false, fmt.Errorf(" \u274C Error in if-expression: \"if: %s\" (%s)", job.If.Value, runJobErr) } + + if jobType == model.JobTypeInvalid { + return false, jobTypeErr + } else if jobType != model.JobTypeDefault { + return true, nil + } + if !runJob { l.WithField("jobResult", "skipped").Debugf("Skipping job '%s' due to '%s'", job.Name, job.If.Value) return false, nil } - if job.Type() != model.JobTypeDefault { - return true, nil - } - img := rc.platformImage(ctx) if img == "" { if job.RunsOn() == nil { @@ -1010,37 +1018,37 @@ func setActionRuntimeVars(rc *RunContext, env map[string]string) { env["ACTIONS_RUNTIME_TOKEN"] = actionsRuntimeToken } -func (rc *RunContext) handleCredentials(ctx context.Context) (username, password string, err error) { +func (rc *RunContext) handleCredentials(ctx context.Context) (string, string, error) { // TODO: remove below 2 lines when we can release act with breaking changes - username = rc.Config.Secrets["DOCKER_USERNAME"] - password = rc.Config.Secrets["DOCKER_PASSWORD"] + username := rc.Config.Secrets["DOCKER_USERNAME"] + password := rc.Config.Secrets["DOCKER_PASSWORD"] container := rc.Run.Job().Container() if container == nil || container.Credentials == nil { - return + return username, password, nil } if container.Credentials != nil && len(container.Credentials) != 2 { - err = fmt.Errorf("invalid property count for key 'credentials:'") - return + err := fmt.Errorf("invalid property count for key 'credentials:'") + return "", "", err } ee := rc.NewExpressionEvaluator(ctx) if username = ee.Interpolate(ctx, container.Credentials["username"]); username == "" { - err = fmt.Errorf("failed to interpolate container.credentials.username") - return + err := fmt.Errorf("failed to interpolate container.credentials.username") + return "", "", err } if password = ee.Interpolate(ctx, container.Credentials["password"]); password == "" { - err = fmt.Errorf("failed to interpolate container.credentials.password") - return + err := fmt.Errorf("failed to interpolate container.credentials.password") + return "", "", err } if container.Credentials["username"] == "" || container.Credentials["password"] == "" { - err = fmt.Errorf("container.credentials cannot be empty") - return + err := fmt.Errorf("container.credentials cannot be empty") + return "", "", err } - return username, password, err + return username, password, nil } func (rc *RunContext) handleServiceCredentials(ctx context.Context, creds map[string]string) (username, password string, err error) { diff --git a/act/runner/runner.go b/act/runner/runner.go index 22c8ea64..e9f00e28 100644 --- a/act/runner/runner.go +++ b/act/runner/runner.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" "os" + "runtime" "time" + docker_container "github.com/docker/docker/api/types/container" log "github.com/sirupsen/logrus" - docker_container "github.com/docker/docker/api/types/container" "github.com/nektos/act/pkg/common" - "github.com/nektos/act/pkg/container" "github.com/nektos/act/pkg/model" ) @@ -34,6 +34,7 @@ type Config struct { ForceRebuild bool // force rebuilding local docker image action LogOutput bool // log the output from docker run JSONLogger bool // use json or text logger + LogPrefixJobID bool // switches from the full job name to the job id Env map[string]string // env for containers Inputs map[string]string // manually passed action inputs Secrets map[string]string // list of secrets @@ -128,15 +129,45 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor { maxJobNameLen := 0 stagePipeline := make([]common.Executor, 0) + log.Debugf("Plan Stages: %v", plan.Stages) + for i := range plan.Stages { stage := plan.Stages[i] stagePipeline = append(stagePipeline, func(ctx context.Context) error { pipeline := make([]common.Executor, 0) for _, run := range stage.Runs { + log.Debugf("Stages Runs: %v", stage.Runs) stageExecutor := make([]common.Executor, 0) job := run.Job() + log.Debugf("Job.Name: %v", job.Name) + log.Debugf("Job.RawNeeds: %v", job.RawNeeds) + log.Debugf("Job.RawRunsOn: %v", job.RawRunsOn) + log.Debugf("Job.Env: %v", job.Env) + log.Debugf("Job.If: %v", job.If) + for step := range job.Steps { + if nil != job.Steps[step] { + log.Debugf("Job.Steps: %v", job.Steps[step].String()) + } + } + log.Debugf("Job.TimeoutMinutes: %v", job.TimeoutMinutes) + log.Debugf("Job.Services: %v", job.Services) + log.Debugf("Job.Strategy: %v", job.Strategy) + log.Debugf("Job.RawContainer: %v", job.RawContainer) + log.Debugf("Job.Defaults.Run.Shell: %v", job.Defaults.Run.Shell) + log.Debugf("Job.Defaults.Run.WorkingDirectory: %v", job.Defaults.Run.WorkingDirectory) + log.Debugf("Job.Outputs: %v", job.Outputs) + log.Debugf("Job.Uses: %v", job.Uses) + log.Debugf("Job.With: %v", job.With) + // log.Debugf("Job.RawSecrets: %v", job.RawSecrets) + log.Debugf("Job.Result: %v", job.Result) if job.Strategy != nil { + log.Debugf("Job.Strategy.FailFast: %v", job.Strategy.FailFast) + log.Debugf("Job.Strategy.MaxParallel: %v", job.Strategy.MaxParallel) + log.Debugf("Job.Strategy.FailFastString: %v", job.Strategy.FailFastString) + log.Debugf("Job.Strategy.MaxParallelString: %v", job.Strategy.MaxParallelString) + log.Debugf("Job.Strategy.RawMatrix: %v", job.Strategy.RawMatrix) + strategyRc := runner.newRunContext(ctx, run, nil) if err := strategyRc.NewExpressionEvaluator(ctx).EvaluateYamlNode(ctx, &job.Strategy.RawMatrix); err != nil { log.Errorf("Error while evaluating matrix: %v", err) @@ -147,6 +178,8 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor { if m, err := job.GetMatrixes(); err != nil { log.Errorf("Error while get job's matrix: %v", err) } else { + log.Debugf("Job Matrices: %v", m) + log.Debugf("Runner Matrices: %v", runner.config.Matrix) matrixes = selectMatrixes(m, runner.config.Matrix) } log.Debugf("Final matrix after applying user inclusions '%v'", matrixes) @@ -172,19 +205,22 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor { } stageExecutor = append(stageExecutor, func(ctx context.Context) error { jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String()) - return rc.Executor()(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix))) + executor, err := rc.Executor() + + if err != nil { + return err + } + + return executor(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix))) }) } pipeline = append(pipeline, common.NewParallelExecutor(maxParallel, stageExecutor...)) } - var ncpu int - info, err := container.GetHostInfo(ctx) - if err != nil { - log.Errorf("failed to obtain container engine info: %s", err) - ncpu = 1 // sane default? - } else { - ncpu = info.NCPU + ncpu := runtime.NumCPU() + if 1 > ncpu { + ncpu = 1 } + log.Debugf("Detected CPUs: %d", ncpu) return common.NewParallelExecutor(ncpu, pipeline...)(ctx) }) } diff --git a/act/runner/runner_test.go b/act/runner/runner_test.go index a76062f2..32da79ca 100644 --- a/act/runner/runner_test.go +++ b/act/runner/runner_test.go @@ -288,6 +288,7 @@ func TestRunEvent(t *testing.T) { {workdir, "docker-action-custom-path", "push", "", platforms, secrets}, {workdir, "GITHUB_ENV-use-in-env-ctx", "push", "", platforms, secrets}, {workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms, secrets}, + {workdir, "workflow_call_inputs", "workflow_call", "", platforms, secrets}, {workdir, "workflow_dispatch", "workflow_dispatch", "", platforms, secrets}, {workdir, "workflow_dispatch_no_inputs_mapping", "workflow_dispatch", "", platforms, secrets}, {workdir, "workflow_dispatch-scalar", "workflow_dispatch", "", platforms, secrets}, diff --git a/act/runner/step.go b/act/runner/step.go index 9cc6aea4..ffb2efbc 100644 --- a/act/runner/step.go +++ b/act/runner/step.go @@ -256,7 +256,7 @@ func isStepEnabled(ctx context.Context, expr string, step step, stage stepStage) return runStep, nil } -func isContinueOnError(ctx context.Context, expr string, step step, stage stepStage) (bool, error) { +func isContinueOnError(ctx context.Context, expr string, step step, _ stepStage) (bool, error) { // https://github.com/github/docs/blob/3ae84420bd10997bb5f35f629ebb7160fe776eae/content/actions/reference/workflow-syntax-for-github-actions.md?plain=true#L962 if len(strings.TrimSpace(expr)) == 0 { return false, nil diff --git a/act/runner/step_action_local.go b/act/runner/step_action_local.go index 6b4fc062..a745e686 100644 --- a/act/runner/step_action_local.go +++ b/act/runner/step_action_local.go @@ -85,7 +85,7 @@ func (sal *stepActionLocal) getEnv() *map[string]string { return &sal.env } -func (sal *stepActionLocal) getIfExpression(context context.Context, stage stepStage) string { +func (sal *stepActionLocal) getIfExpression(_ context.Context, stage stepStage) string { switch stage { case stepStageMain: return sal.Step.If.Value diff --git a/act/runner/step_action_local_test.go b/act/runner/step_action_local_test.go index 5fe7f291..c4b63459 100644 --- a/act/runner/step_action_local_test.go +++ b/act/runner/step_action_local_test.go @@ -24,7 +24,7 @@ func (salm *stepActionLocalMocks) runAction(step actionStep, actionDir string, r return args.Get(0).(func(context.Context) error) } -func (salm *stepActionLocalMocks) readAction(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) { +func (salm *stepActionLocalMocks) readAction(_ context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) { args := salm.Called(step, actionDir, actionPath, readFile, writeFile) return args.Get(0).(*model.Action), args.Error(1) } diff --git a/act/runner/step_action_remote_test.go b/act/runner/step_action_remote_test.go index 3199419a..9dcc3383 100644 --- a/act/runner/step_action_remote_test.go +++ b/act/runner/step_action_remote_test.go @@ -21,7 +21,7 @@ type stepActionRemoteMocks struct { mock.Mock } -func (sarm *stepActionRemoteMocks) readAction(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) { +func (sarm *stepActionRemoteMocks) readAction(_ context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) { args := sarm.Called(step, actionDir, actionPath, readFile, writeFile) return args.Get(0).(*model.Action), args.Error(1) } diff --git a/act/runner/step_docker.go b/act/runner/step_docker.go index 725bfff5..2f239866 100644 --- a/act/runner/step_docker.go +++ b/act/runner/step_docker.go @@ -51,7 +51,7 @@ func (sd *stepDocker) getEnv() *map[string]string { return &sd.env } -func (sd *stepDocker) getIfExpression(context context.Context, stage stepStage) string { +func (sd *stepDocker) getIfExpression(_ context.Context, _ stepStage) string { return sd.Step.If.Value } diff --git a/act/runner/step_run.go b/act/runner/step_run.go index 4d855fdb..9d887e7f 100644 --- a/act/runner/step_run.go +++ b/act/runner/step_run.go @@ -59,7 +59,7 @@ func (sr *stepRun) getEnv() *map[string]string { return &sr.env } -func (sr *stepRun) getIfExpression(context context.Context, stage stepStage) string { +func (sr *stepRun) getIfExpression(_ context.Context, _ stepStage) string { return sr.Step.If.Value } diff --git a/act/runner/testdata/workflow_call_inputs/event.json b/act/runner/testdata/workflow_call_inputs/event.json new file mode 100644 index 00000000..d3ecab10 --- /dev/null +++ b/act/runner/testdata/workflow_call_inputs/event.json @@ -0,0 +1,6 @@ +{ + "inputs": { + "required": "required input", + "boolean": "true" + } +} diff --git a/act/runner/testdata/workflow_call_inputs/workflow_call_inputs.yml b/act/runner/testdata/workflow_call_inputs/workflow_call_inputs.yml new file mode 100644 index 00000000..1a5cca3f --- /dev/null +++ b/act/runner/testdata/workflow_call_inputs/workflow_call_inputs.yml @@ -0,0 +1,36 @@ +name: workflow_call + +on: + workflow_call: + inputs: + required: + description: a required input + required: true + with_default: + description: an input with default + required: false + default: default + boolean: + description: an input of type boolean + required: false + type: boolean + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: test required input + run: | + echo input.required=${{ inputs.required }} + [[ "${{ inputs.required }}" = "required input" ]] || exit 1 + - name: test input with default + run: | + echo input.with_default=${{ inputs.with_default }} + [[ "${{ inputs.with_default }}" = "default" ]] || exit 1 + - id: boolean-test + name: run on boolean input + if: ${{ inputs.boolean == true }} + run: echo "::set-output name=value::executed" + - name: has boolean test? + run: | + [[ "${{ steps.boolean-test.outputs.value }}" = "executed" ]] || exit 1 diff --git a/cmd/input.go b/cmd/input.go index 64556e1d..1ae27930 100644 --- a/cmd/input.go +++ b/cmd/input.go @@ -55,6 +55,7 @@ type Input struct { replaceGheActionTokenWithGithubCom string matrix []string actionCachePath string + logPrefixJobID bool } func (i *Input) resolve(path string) string { diff --git a/cmd/root.go b/cmd/root.go index 59a571e2..679d20eb 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -75,6 +75,7 @@ func Execute(ctx context.Context, version string) { rootCmd.PersistentFlags().StringVarP(&input.workdir, "directory", "C", ".", "working directory") rootCmd.PersistentFlags().BoolP("verbose", "v", false, "verbose output") rootCmd.PersistentFlags().BoolVar(&input.jsonLogger, "json", false, "Output logs in json format") + rootCmd.PersistentFlags().BoolVar(&input.logPrefixJobID, "log-prefix-job-id", false, "Output the job id within non-json logs instead of the entire name") rootCmd.PersistentFlags().BoolVarP(&input.noOutput, "quiet", "q", false, "disable logging of output from steps") rootCmd.PersistentFlags().BoolVarP(&input.dryrun, "dryrun", "n", false, "dryrun mode") rootCmd.PersistentFlags().StringVarP(&input.secretfile, "secret-file", "", ".secrets", "file with list of secrets to read from (e.g. --secret-file .secrets)") @@ -273,7 +274,7 @@ func readArgsFile(file string, split bool) []string { return args } -func setup(inputs *Input) func(*cobra.Command, []string) { +func setup(_ *Input) func(*cobra.Command, []string) { return func(cmd *cobra.Command, _ []string) { verbose, _ := cmd.Flags().GetBool("verbose") if verbose { @@ -343,12 +344,11 @@ func parseMatrix(matrix []string) map[string]map[string]bool { matrix := r.Split(m, 2) if len(matrix) < 2 { log.Fatalf("Invalid matrix format. Failed to parse %s", m) - } else { - if _, ok := matrixes[matrix[0]]; !ok { - matrixes[matrix[0]] = make(map[string]bool) - } - matrixes[matrix[0]][matrix[1]] = true } + if _, ok := matrixes[matrix[0]]; !ok { + matrixes[matrix[0]] = make(map[string]bool) + } + matrixes[matrix[0]][matrix[1]] = true } return matrixes } @@ -585,6 +585,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str BindWorkdir: input.bindWorkdir, LogOutput: !input.noOutput, JSONLogger: input.jsonLogger, + LogPrefixJobID: input.logPrefixJobID, Env: envs, Secrets: secrets, Vars: vars,