1
0
Fork 0
mirror of https://code.forgejo.org/forgejo/runner.git synced 2025-08-06 17:40:58 +00:00

Merge tag 'nektos/v0.2.45'

This commit is contained in:
Jason Song 2023-05-04 17:45:53 +08:00
commit 441bf3e1a7
39 changed files with 1788 additions and 219 deletions

View file

@ -1,6 +1,10 @@
name: checks
on: [pull_request, workflow_dispatch]
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.ref }}
env:
ACT_OWNER: ${{ github.repository_owner }}
ACT_REPOSITORY: ${{ github.repository }}
@ -15,14 +19,14 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true
- uses: golangci/golangci-lint-action@v3.4.0
with:
version: v1.47.2
- uses: megalinter/megalinter/flavors/go@v6.20.0
- uses: megalinter/megalinter/flavors/go@v6.22.2
env:
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -39,7 +43,7 @@ jobs:
fetch-depth: 2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true
@ -54,8 +58,10 @@ jobs:
uses: ./.github/actions/run-tests
with:
upload-logs-name: logs-linux
- name: Run act from cli
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
- name: Upload Codecov report
uses: codecov/codecov-action@v3.1.1
uses: codecov/codecov-action@v3.1.3
with:
files: coverage.txt
fail_ci_if_error: true # optional (default = false)
@ -72,7 +78,7 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true
@ -87,7 +93,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true

View file

@ -17,8 +17,8 @@ jobs:
fetch-depth: 0
ref: master
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
- uses: fregante/setup-git-user@v1
- uses: actions/setup-go@v3
- uses: fregante/setup-git-user@v2
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true

View file

@ -15,7 +15,7 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true

View file

@ -8,7 +8,7 @@ jobs:
name: Stale
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v7
- uses: actions/stale@v8
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'

8
act/artifactcache/doc.go Normal file
View file

@ -0,0 +1,8 @@
// Package artifactcache provides a cache handler for the runner.
//
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
//
// TODO: Authorization
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
package artifactcache

View file

@ -0,0 +1,488 @@
package artifactcache
import (
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/julienschmidt/httprouter"
"github.com/sirupsen/logrus"
"github.com/timshannon/bolthold"
"go.etcd.io/bbolt"
"github.com/nektos/act/pkg/common"
)
const (
urlBase = "/_apis/artifactcache"
)
type Handler struct {
db *bolthold.Store
storage *Storage
router *httprouter.Router
listener net.Listener
server *http.Server
logger logrus.FieldLogger
gcing int32 // TODO: use atomic.Bool when we can use Go 1.19
gcAt time.Time
outboundIP string
}
func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) {
h := &Handler{}
if logger == nil {
discard := logrus.New()
discard.Out = io.Discard
logger = discard
}
logger = logger.WithField("module", "artifactcache")
h.logger = logger
if dir == "" {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
dir = filepath.Join(home, ".cache", "actcache")
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
db, err := bolthold.Open(filepath.Join(dir, "bolt.db"), 0o644, &bolthold.Options{
Encoder: json.Marshal,
Decoder: json.Unmarshal,
Options: &bbolt.Options{
Timeout: 5 * time.Second,
NoGrowSync: bbolt.DefaultOptions.NoGrowSync,
FreelistType: bbolt.DefaultOptions.FreelistType,
},
})
if err != nil {
return nil, err
}
h.db = db
storage, err := NewStorage(filepath.Join(dir, "cache"))
if err != nil {
return nil, err
}
h.storage = storage
if outboundIP != "" {
h.outboundIP = outboundIP
} else if ip := common.GetOutboundIP(); ip == nil {
return nil, fmt.Errorf("unable to determine outbound IP address")
} else {
h.outboundIP = ip.String()
}
router := httprouter.New()
router.GET(urlBase+"/cache", h.middleware(h.find))
router.POST(urlBase+"/caches", h.middleware(h.reserve))
router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload))
router.POST(urlBase+"/caches/:id", h.middleware(h.commit))
router.GET(urlBase+"/artifacts/:id", h.middleware(h.get))
router.POST(urlBase+"/clean", h.middleware(h.clean))
h.router = router
h.gcCache()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
if err != nil {
return nil, err
}
server := &http.Server{
ReadHeaderTimeout: 2 * time.Second,
Handler: router,
}
go func() {
if err := server.Serve(listener); err != nil && errors.Is(err, net.ErrClosed) {
logger.Errorf("http serve: %v", err)
}
}()
h.listener = listener
h.server = server
return h, nil
}
func (h *Handler) ExternalURL() string {
// TODO: make the external url configurable if necessary
return fmt.Sprintf("http://%s:%d",
h.outboundIP,
h.listener.Addr().(*net.TCPAddr).Port)
}
func (h *Handler) Close() error {
if h == nil {
return nil
}
var retErr error
if h.server != nil {
err := h.server.Close()
if err != nil {
retErr = err
}
h.server = nil
}
if h.listener != nil {
err := h.listener.Close()
if errors.Is(err, net.ErrClosed) {
err = nil
}
if err != nil {
retErr = err
}
h.listener = nil
}
if h.db != nil {
err := h.db.Close()
if err != nil {
retErr = err
}
h.db = nil
}
return retErr
}
// GET /_apis/artifactcache/cache
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
keys := strings.Split(r.URL.Query().Get("keys"), ",")
// cache keys are case insensitive
for i, key := range keys {
keys[i] = strings.ToLower(key)
}
version := r.URL.Query().Get("version")
cache, err := h.findCache(keys, version)
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
if cache == nil {
h.responseJSON(w, r, 204)
return
}
if ok, err := h.storage.Exist(cache.ID); err != nil {
h.responseJSON(w, r, 500, err)
return
} else if !ok {
_ = h.db.Delete(cache.ID, cache)
h.responseJSON(w, r, 204)
return
}
h.responseJSON(w, r, 200, map[string]any{
"result": "hit",
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
"cacheKey": cache.Key,
})
}
// POST /_apis/artifactcache/caches
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
api := &Request{}
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
h.responseJSON(w, r, 400, err)
return
}
// cache keys are case insensitive
api.Key = strings.ToLower(api.Key)
cache := api.ToCache()
cache.FillKeyVersionHash()
if err := h.db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
if !errors.Is(err, bolthold.ErrNotFound) {
h.responseJSON(w, r, 500, err)
return
}
} else {
h.responseJSON(w, r, 400, fmt.Errorf("already exist"))
return
}
now := time.Now().Unix()
cache.CreatedAt = now
cache.UsedAt = now
if err := h.db.Insert(bolthold.NextSequence(), cache); err != nil {
h.responseJSON(w, r, 500, err)
return
}
// write back id to db
if err := h.db.Update(cache.ID, cache); err != nil {
h.responseJSON(w, r, 500, err)
return
}
h.responseJSON(w, r, 200, map[string]any{
"cacheId": cache.ID,
})
}
// PATCH /_apis/artifactcache/caches/:id
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
cache := &Cache{}
if err := h.db.Get(id, cache); err != nil {
if errors.Is(err, bolthold.ErrNotFound) {
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
h.responseJSON(w, r, 500, err)
return
}
if cache.Complete {
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
h.responseJSON(w, r, 500, err)
}
h.useCache(id)
h.responseJSON(w, r, 200)
}
// POST /_apis/artifactcache/caches/:id
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
cache := &Cache{}
if err := h.db.Get(id, cache); err != nil {
if errors.Is(err, bolthold.ErrNotFound) {
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
h.responseJSON(w, r, 500, err)
return
}
if cache.Complete {
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
if err := h.storage.Commit(cache.ID, cache.Size); err != nil {
h.responseJSON(w, r, 500, err)
return
}
cache.Complete = true
if err := h.db.Update(cache.ID, cache); err != nil {
h.responseJSON(w, r, 500, err)
return
}
h.responseJSON(w, r, 200)
}
// GET /_apis/artifactcache/artifacts/:id
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
h.useCache(id)
h.storage.Serve(w, r, uint64(id))
}
// POST /_apis/artifactcache/clean
func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// TODO: don't support force deleting cache entries
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
h.responseJSON(w, r, 200)
}
func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
h.logger.Debugf("%s %s", r.Method, r.RequestURI)
handler(w, r, params)
go h.gcCache()
}
}
// if not found, return (nil, nil) instead of an error.
func (h *Handler) findCache(keys []string, version string) (*Cache, error) {
if len(keys) == 0 {
return nil, nil
}
key := keys[0] // the first key is for exact match.
cache := &Cache{
Key: key,
Version: version,
}
cache.FillKeyVersionHash()
if err := h.db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
if !errors.Is(err, bolthold.ErrNotFound) {
return nil, err
}
} else if cache.Complete {
return cache, nil
}
stop := fmt.Errorf("stop")
for _, prefix := range keys[1:] {
found := false
if err := h.db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error {
if !strings.HasPrefix(v.Key, prefix) {
return stop
}
if v.Complete {
cache = v
found = true
return stop
}
return nil
}); err != nil {
if !errors.Is(err, stop) {
return nil, err
}
}
if found {
return cache, nil
}
}
return nil, nil
}
func (h *Handler) useCache(id int64) {
cache := &Cache{}
if err := h.db.Get(id, cache); err != nil {
return
}
cache.UsedAt = time.Now().Unix()
_ = h.db.Update(cache.ID, cache)
}
func (h *Handler) gcCache() {
if atomic.LoadInt32(&h.gcing) != 0 {
return
}
if !atomic.CompareAndSwapInt32(&h.gcing, 0, 1) {
return
}
defer atomic.StoreInt32(&h.gcing, 0)
if time.Since(h.gcAt) < time.Hour {
h.logger.Debugf("skip gc: %v", h.gcAt.String())
return
}
h.gcAt = time.Now()
h.logger.Debugf("gc: %v", h.gcAt.String())
const (
keepUsed = 30 * 24 * time.Hour
keepUnused = 7 * 24 * time.Hour
keepTemp = 5 * time.Minute
)
var caches []*Cache
if err := h.db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix())); err != nil {
h.logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
if cache.Complete {
continue
}
h.storage.Remove(cache.ID)
if err := h.db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
caches = caches[:0]
if err := h.db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix())); err != nil {
h.logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
caches = caches[:0]
if err := h.db.Find(&caches, bolthold.Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix())); err != nil {
h.logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
}
func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
var data []byte
if len(v) == 0 || v[0] == nil {
data, _ = json.Marshal(struct{}{})
} else if err, ok := v[0].(error); ok {
h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
data, _ = json.Marshal(map[string]any{
"error": err.Error(),
})
} else {
data, _ = json.Marshal(v[0])
}
w.WriteHeader(code)
_, _ = w.Write(data)
}
func parseContentRange(s string) (int64, int64, error) {
// support the format like "bytes 11-22/*" only
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
s1, s2, _ := strings.Cut(s, "-")
start, err := strconv.ParseInt(s1, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
stop, err := strconv.ParseInt(s2, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
return start, stop, nil
}

View file

@ -0,0 +1,469 @@
package artifactcache
import (
"bytes"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
func TestHandler(t *testing.T) {
dir := filepath.Join(t.TempDir(), "artifactcache")
handler, err := StartHandler(dir, "", 0, nil)
require.NoError(t, err)
base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase)
defer func() {
t.Run("inpect db", func(t *testing.T) {
require.NoError(t, handler.db.Bolt().View(func(tx *bbolt.Tx) error {
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
t.Logf("%s: %s", k, v)
return nil
})
}))
})
t.Run("close", func(t *testing.T) {
require.NoError(t, handler.Close())
assert.Nil(t, handler.server)
assert.Nil(t, handler.listener)
assert.Nil(t, handler.db)
_, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil)
assert.Error(t, err)
})
}()
t.Run("get not exist", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
require.NoError(t, err)
require.Equal(t, 204, resp.StatusCode)
})
t.Run("reserve and upload", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
uploadCacheNormally(t, base, key, version, content)
})
t.Run("clean", func(t *testing.T) {
resp, err := http.Post(fmt.Sprintf("%s/clean", base), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
})
t.Run("reserve with bad request", func(t *testing.T) {
body := []byte(`invalid json`)
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
})
t.Run("duplicate reserve", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
}
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("upload with bad id", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/invalid_id", base), bytes.NewReader(nil))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
})
t.Run("upload without reserve", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, 1000), bytes.NewReader(nil))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
})
t.Run("upload with complete", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("upload with invalid range", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes xx-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("commit with bad id", func(t *testing.T) {
{
resp, err := http.Post(fmt.Sprintf("%s/caches/invalid_id", base), "", nil)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("commit with not exist id", func(t *testing.T) {
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("duplicate commit", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("commit early", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content[:50]))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-59/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 500, resp.StatusCode)
}
})
t.Run("get with bad id", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/artifacts/invalid_id", base))
require.NoError(t, err)
require.Equal(t, 400, resp.StatusCode)
})
t.Run("get with not exist id", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
require.NoError(t, err)
require.Equal(t, 404, resp.StatusCode)
})
t.Run("get with not exist id", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
require.NoError(t, err)
require.Equal(t, 404, resp.StatusCode)
})
t.Run("get with multiple keys", func(t *testing.T) {
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
key := strings.ToLower(t.Name())
keys := [3]string{
key + "_a",
key + "_a_b",
key + "_a_b_c",
}
contents := [3][]byte{
make([]byte, 100),
make([]byte, 200),
make([]byte, 300),
}
for i := range contents {
_, err := rand.Read(contents[i])
require.NoError(t, err)
uploadCacheNormally(t, base, keys[i], version, contents[i])
}
reqKeys := strings.Join([]string{
key + "_a_b_x",
key + "_a_b",
key + "_a",
}, ",")
var archiveLocation string
{
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got := struct {
Result string `json:"result"`
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, "hit", got.Result)
assert.Equal(t, keys[1], got.CacheKey)
archiveLocation = got.ArchiveLocation
}
{
resp, err := http.Get(archiveLocation) //nolint:gosec
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, contents[1], got)
}
})
t.Run("case insensitive", func(t *testing.T) {
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
key := strings.ToLower(t.Name())
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
uploadCacheNormally(t, base, key+"_ABC", version, content)
{
reqKey := key + "_aBc"
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got := struct {
Result string `json:"result"`
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, "hit", got.Result)
assert.Equal(t, key+"_abc", got.CacheKey)
}
})
}
func uploadCacheNormally(t *testing.T, base, key, version string, content []byte) {
var id uint64
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: int64(len(content)),
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
var archiveLocation string
{
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got := struct {
Result string `json:"result"`
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, "hit", got.Result)
assert.Equal(t, strings.ToLower(key), got.CacheKey)
archiveLocation = got.ArchiveLocation
}
{
resp, err := http.Get(archiveLocation) //nolint:gosec
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, content, got)
}
}

View file

@ -0,0 +1,38 @@
package artifactcache
import (
"crypto/sha256"
"fmt"
)
type Request struct {
Key string `json:"key" `
Version string `json:"version"`
Size int64 `json:"cacheSize"`
}
func (c *Request) ToCache() *Cache {
if c == nil {
return nil
}
return &Cache{
Key: c.Key,
Version: c.Version,
Size: c.Size,
}
}
type Cache struct {
ID uint64 `json:"id" boltholdKey:"ID"`
Key string `json:"key" boltholdIndex:"Key"`
Version string `json:"version" boltholdIndex:"Version"`
KeyVersionHash string `json:"keyVersionHash" boltholdUnique:"KeyVersionHash"`
Size int64 `json:"cacheSize"`
Complete bool `json:"complete"`
UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"`
CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"`
}
func (c *Cache) FillKeyVersionHash() {
c.KeyVersionHash = fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%s:%s", c.Key, c.Version))))
}

View file

@ -0,0 +1,126 @@
package artifactcache
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
)
type Storage struct {
rootDir string
}
func NewStorage(rootDir string) (*Storage, error) {
if err := os.MkdirAll(rootDir, 0o755); err != nil {
return nil, err
}
return &Storage{
rootDir: rootDir,
}, nil
}
func (s *Storage) Exist(id uint64) (bool, error) {
name := s.filename(id)
if _, err := os.Stat(name); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (s *Storage) Write(id uint64, offset int64, reader io.Reader) error {
name := s.tempName(id, offset)
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, reader)
return err
}
func (s *Storage) Commit(id uint64, size int64) error {
defer func() {
_ = os.RemoveAll(s.tempDir(id))
}()
name := s.filename(id)
tempNames, err := s.tempNames(id)
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
var written int64
for _, v := range tempNames {
f, err := os.Open(v)
if err != nil {
return err
}
n, err := io.Copy(file, f)
_ = f.Close()
if err != nil {
return err
}
written += n
}
if written != size {
_ = file.Close()
_ = os.Remove(name)
return fmt.Errorf("broken file: %v != %v", written, size)
}
return nil
}
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id uint64) {
name := s.filename(id)
http.ServeFile(w, r, name)
}
func (s *Storage) Remove(id uint64) {
_ = os.Remove(s.filename(id))
_ = os.RemoveAll(s.tempDir(id))
}
func (s *Storage) filename(id uint64) string {
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
}
func (s *Storage) tempDir(id uint64) string {
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
}
func (s *Storage) tempName(id uint64, offset int64) string {
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
}
func (s *Storage) tempNames(id uint64) ([]string, error) {
dir := s.tempDir(id)
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var names []string
for _, v := range files {
if !v.IsDir() {
names = append(names, filepath.Join(dir, v.Name()))
}
}
return names, nil
}

View file

@ -0,0 +1,30 @@
# Copied from https://github.com/actions/cache#example-cache-workflow
name: Caching Primes
on: push
jobs:
build:
runs-on: ubuntu-latest
steps:
- run: env
- uses: actions/checkout@v3
- name: Cache Primes
id: cache-primes
uses: actions/cache@v3
with:
path: prime-numbers
key: ${{ runner.os }}-primes-${{ github.run_id }}
restore-keys: |
${{ runner.os }}-primes
${{ runner.os }}
- name: Generate Prime Numbers
if: steps.cache-primes.outputs.cache-hit != 'true'
run: cat /proc/sys/kernel/random/uuid > prime-numbers
- name: Use Prime Numbers
run: cat prime-numbers

View file

@ -2,20 +2,74 @@ package common
import (
"net"
log "github.com/sirupsen/logrus"
"sort"
"strings"
)
// https://stackoverflow.com/a/37382208
// Get preferred outbound ip of this machine
// GetOutboundIP returns an outbound IP address of this machine.
// It tries to access the internet and returns the local IP address of the connection.
// If the machine cannot access the internet, it returns a preferred IP address from network interfaces.
// It returns nil if no IP address is found.
func GetOutboundIP() net.IP {
// See https://stackoverflow.com/a/37382208
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
log.Fatal(err)
if err == nil {
defer conn.Close()
return conn.LocalAddr().(*net.UDPAddr).IP
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
// So the machine cannot access the internet. Pick an IP address from network interfaces.
if ifs, err := net.Interfaces(); err == nil {
type IP struct {
net.IP
net.Interface
}
var ips []IP
for _, i := range ifs {
if addrs, err := i.Addrs(); err == nil {
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsGlobalUnicast() {
ips = append(ips, IP{ip, i})
}
}
}
}
if len(ips) > 1 {
sort.Slice(ips, func(i, j int) bool {
ifi := ips[i].Interface
ifj := ips[j].Interface
return localAddr.IP
// ethernet is preferred
if vi, vj := strings.HasPrefix(ifi.Name, "e"), strings.HasPrefix(ifj.Name, "e"); vi != vj {
return vi
}
ipi := ips[i].IP
ipj := ips[j].IP
// IPv4 is preferred
if vi, vj := ipi.To4() != nil, ipj.To4() != nil; vi != vj {
return vi
}
// en0 is preferred to en1
if ifi.Name != ifj.Name {
return ifi.Name < ifj.Name
}
// fallback
return ipi.String() < ipj.String()
})
return ips[0].IP
}
}
return nil
}

View file

@ -210,9 +210,6 @@ type containerReference struct {
}
func GetDockerClient(ctx context.Context) (cli client.APIClient, err error) {
// TODO: this should maybe need to be a global option, not hidden in here?
// though i'm not sure how that works out when there's another Executor :D
// I really would like something that works on OSX native for eg
dockerHost := os.Getenv("DOCKER_HOST")
if strings.HasPrefix(dockerHost, "ssh://") {
@ -369,6 +366,12 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
}
if len(copts.netMode.Value()) == 0 {
if err = copts.netMode.Set("host"); err != nil {
return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
}
}
containerConfig, err := parse(flags, copts, "")
if err != nil {
return nil, nil, fmt.Errorf("Cannot process container options: '%s': '%w'", input.Options, err)

View file

@ -10,4 +10,6 @@ type ExecutionsEnvironment interface {
DefaultPathVariable() string
JoinPathVariable(...string) string
GetRunnerContext(ctx context.Context) map[string]interface{}
// On windows PATH and Path are the same key
IsEnvironmentCaseInsensitive() bool
}

View file

@ -425,3 +425,7 @@ func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (
e.StdOut = stdout
return org, org
}
func (*HostEnvironment) IsEnvironmentCaseInsensitive() bool {
return runtime.GOOS == "windows"
}

View file

@ -71,3 +71,7 @@ func (*LinuxContainerEnvironmentExtensions) GetRunnerContext(ctx context.Context
"tool_cache": "/opt/hostedtoolcache",
}
}
func (*LinuxContainerEnvironmentExtensions) IsEnvironmentCaseInsensitive() bool {
return false
}

View file

@ -36,6 +36,9 @@ type GithubContext struct {
RetentionDays string `json:"retention_days"`
RunnerPerflog string `json:"runner_perflog"`
RunnerTrackingID string `json:"runner_tracking_id"`
ServerURL string `json:"server_url"`
APIURL string `json:"api_url"`
GraphQLURL string `json:"graphql_url"`
}
func asString(v interface{}) string {

View file

@ -58,9 +58,8 @@ func (w *Workflow) On() []string {
func (w *Workflow) OnEvent(event string) interface{} {
if w.RawOn.Kind == yaml.MappingNode {
var val map[string]interface{}
err := w.RawOn.Decode(&val)
if err != nil {
log.Fatal(err)
if !decodeNode(w.RawOn, &val) {
return nil
}
return val[event]
}
@ -109,16 +108,14 @@ func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
}
var val map[string]yaml.Node
err := w.RawOn.Decode(&val)
if err != nil {
log.Fatal(err)
if !decodeNode(w.RawOn, &val) {
return nil
}
var config WorkflowDispatch
node := val["workflow_dispatch"]
err = node.Decode(&config)
if err != nil {
log.Fatal(err)
if !decodeNode(node, &config) {
return nil
}
return &config
@ -147,20 +144,19 @@ type WorkflowCallResult struct {
func (w *Workflow) WorkflowCallConfig() *WorkflowCall {
if w.RawOn.Kind != yaml.MappingNode {
return nil
// The callers expect for "on: workflow_call" and "on: [ workflow_call ]" a non nil return value
return &WorkflowCall{}
}
var val map[string]yaml.Node
err := w.RawOn.Decode(&val)
if err != nil {
log.Fatal(err)
if !decodeNode(w.RawOn, &val) {
return &WorkflowCall{}
}
var config WorkflowCall
node := val["workflow_call"]
err = node.Decode(&config)
if err != nil {
log.Fatal(err)
if !decodeNode(node, &config) {
return &WorkflowCall{}
}
return &config
@ -243,9 +239,8 @@ func (j *Job) InheritSecrets() bool {
}
var val string
err := j.RawSecrets.Decode(&val)
if err != nil {
log.Fatal(err)
if !decodeNode(j.RawSecrets, &val) {
return false
}
return val == "inherit"
@ -257,9 +252,8 @@ func (j *Job) Secrets() map[string]string {
}
var val map[string]string
err := j.RawSecrets.Decode(&val)
if err != nil {
log.Fatal(err)
if !decodeNode(j.RawSecrets, &val) {
return nil
}
return val

View file

@ -335,13 +335,13 @@ func evalDockerArgs(ctx context.Context, step step, action *model.Action, cmd *[
inputs[k] = eval.Interpolate(ctx, v)
}
}
mergeIntoMap(step.getEnv(), inputs)
mergeIntoMap(step, step.getEnv(), inputs)
stepEE := rc.NewStepExpressionEvaluator(ctx, step)
for i, v := range *cmd {
(*cmd)[i] = stepEE.Interpolate(ctx, v)
}
mergeIntoMap(step.getEnv(), action.Runs.Env)
mergeIntoMap(step, step.getEnv(), action.Runs.Env)
ee := rc.NewStepExpressionEvaluator(ctx, step)
for k, v := range *step.getEnv() {

View file

@ -105,13 +105,15 @@ func execAsComposite(step actionStep) common.Executor {
rc.Masks = append(rc.Masks, compositeRC.Masks...)
rc.ExtraPath = compositeRC.ExtraPath
// compositeRC.Env is dirty, contains INPUT_ and merged step env, only rely on compositeRC.GlobalEnv
for k, v := range compositeRC.GlobalEnv {
rc.Env[k] = v
if rc.GlobalEnv == nil {
rc.GlobalEnv = map[string]string{}
}
rc.GlobalEnv[k] = v
mergeIntoMap := mergeIntoMapCaseSensitive
if rc.JobContainer.IsEnvironmentCaseInsensitive() {
mergeIntoMap = mergeIntoMapCaseInsensitive
}
if rc.GlobalEnv == nil {
rc.GlobalEnv = map[string]string{}
}
mergeIntoMap(rc.GlobalEnv, compositeRC.GlobalEnv)
mergeIntoMap(rc.Env, compositeRC.GlobalEnv)
return err
}

View file

@ -88,12 +88,18 @@ func (rc *RunContext) setEnv(ctx context.Context, kvPairs map[string]string, arg
if rc.Env == nil {
rc.Env = make(map[string]string)
}
rc.Env[name] = arg
// for composite action GITHUB_ENV and set-env passing
if rc.GlobalEnv == nil {
rc.GlobalEnv = map[string]string{}
}
rc.GlobalEnv[name] = arg
newenv := map[string]string{
name: arg,
}
mergeIntoMap := mergeIntoMapCaseSensitive
if rc.JobContainer != nil && rc.JobContainer.IsEnvironmentCaseInsensitive() {
mergeIntoMap = mergeIntoMapCaseInsensitive
}
mergeIntoMap(rc.Env, newenv)
mergeIntoMap(rc.GlobalEnv, newenv)
}
func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string, arg string) {
logger := common.Logger(ctx)

View file

@ -19,9 +19,7 @@ import (
"time"
"github.com/docker/docker/errdefs"
"github.com/mitchellh/go-homedir"
"github.com/opencontainers/selinux/go-selinux"
log "github.com/sirupsen/logrus"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
@ -92,6 +90,24 @@ func (rc *RunContext) jobContainerName() string {
return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
}
func getDockerDaemonSocketMountPath(daemonPath string) string {
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
scheme := daemonPath[:protoIndex]
if strings.EqualFold(scheme, "npipe") {
// linux container mount on windows, use the default socket path of the VM / wsl2
return "/var/run/docker.sock"
} else if strings.EqualFold(scheme, "unix") {
return daemonPath[protoIndex+3:]
} else if strings.IndexFunc(scheme, func(r rune) bool {
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
}) == -1 {
// unknown protocol use default
return "/var/run/docker.sock"
}
}
return daemonPath
}
// Returns the binds and mounts for the container, resolving paths as appopriate
func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
name := rc.jobContainerName()
@ -100,8 +116,10 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
rc.Config.ContainerDaemonSocket = "/var/run/docker.sock"
}
binds := []string{
fmt.Sprintf("%s:%s", rc.Config.ContainerDaemonSocket, "/var/run/docker.sock"),
binds := []string{}
if rc.Config.ContainerDaemonSocket != "-" {
daemonPath := getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket)
binds = append(binds, fmt.Sprintf("%s:%s", daemonPath, "/var/run/docker.sock"))
}
ext := container.LinuxContainerEnvironmentExtensions{}
@ -377,6 +395,15 @@ func (rc *RunContext) execJobContainer(cmd []string, env map[string]string, user
func (rc *RunContext) ApplyExtraPath(ctx context.Context, env *map[string]string) {
if rc.ExtraPath != nil && len(rc.ExtraPath) > 0 {
path := rc.JobContainer.GetPathVariableName()
if rc.JobContainer.IsEnvironmentCaseInsensitive() {
// On windows system Path and PATH could also be in the map
for k := range *env {
if strings.EqualFold(path, k) {
path = k
break
}
}
}
if (*env)[path] == "" {
cenv := map[string]string{}
var cpath string
@ -471,10 +498,11 @@ func (rc *RunContext) ActionCacheDir() string {
var xdgCache string
var ok bool
if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" {
if home, err := homedir.Dir(); err == nil {
if home, err := os.UserHomeDir(); err == nil {
xdgCache = filepath.Join(home, ".cache")
} else if xdgCache, err = filepath.Abs("."); err != nil {
log.Fatal(err)
// It's almost impossible to get here, so the temp dir is a good fallback
xdgCache = os.TempDir()
}
}
return filepath.Join(xdgCache, "act")
@ -796,6 +824,27 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
ghc.SetRefTypeAndName()
// defaults
ghc.ServerURL = "https://github.com"
ghc.APIURL = "https://api.github.com"
ghc.GraphQLURL = "https://api.github.com/graphql"
// per GHES
if rc.Config.GitHubInstance != "github.com" {
ghc.ServerURL = fmt.Sprintf("https://%s", rc.Config.GitHubInstance)
ghc.APIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
ghc.GraphQLURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
}
// allow to be overridden by user
if rc.Config.Env["GITHUB_SERVER_URL"] != "" {
ghc.ServerURL = rc.Config.Env["GITHUB_SERVER_URL"]
}
if rc.Config.Env["GITHUB_API_URL"] != "" {
ghc.APIURL = rc.Config.Env["GITHUB_API_URL"]
}
if rc.Config.Env["GITHUB_GRAPHQL_URL"] != "" {
ghc.GraphQLURL = rc.Config.Env["GITHUB_GRAPHQL_URL"]
}
return ghc
}
@ -869,16 +918,9 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
env["RUNNER_TRACKING_ID"] = github.RunnerTrackingID
env["GITHUB_BASE_REF"] = github.BaseRef
env["GITHUB_HEAD_REF"] = github.HeadRef
defaultServerURL := "https://github.com"
defaultAPIURL := "https://api.github.com"
defaultGraphqlURL := "https://api.github.com/graphql"
if rc.Config.GitHubInstance != "github.com" {
defaultServerURL = fmt.Sprintf("https://%s", rc.Config.GitHubInstance)
defaultAPIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
defaultGraphqlURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
}
env["GITHUB_SERVER_URL"] = github.ServerURL
env["GITHUB_API_URL"] = github.APIURL
env["GITHUB_GRAPHQL_URL"] = github.GraphQLURL
{ // Adapt to Gitea
instance := rc.Config.GitHubInstance
@ -886,21 +928,9 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
defaultServerURL = instance
defaultAPIURL = instance + "/api/v1" // the version of Gitea is v1
defaultGraphqlURL = "" // Gitea doesn't support graphql
}
if env["GITHUB_SERVER_URL"] == "" {
env["GITHUB_SERVER_URL"] = defaultServerURL
}
if env["GITHUB_API_URL"] == "" {
env["GITHUB_API_URL"] = defaultAPIURL
}
if env["GITHUB_GRAPHQL_URL"] == "" {
env["GITHUB_GRAPHQL_URL"] = defaultGraphqlURL
env["GITHUB_SERVER_URL"] = instance
env["GITHUB_API_URL"] = instance + "/api/v1" // the version of Gitea is v1
env["GITHUB_GRAPHQL_URL"] = "" // Gitea doesn't support graphql
}
if rc.Config.ArtifactServerPath != "" {

View file

@ -5,13 +5,12 @@ import (
"encoding/json"
"fmt"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
"github.com/nektos/act/pkg/model"
log "github.com/sirupsen/logrus"
)
// Runner provides capabilities to run GitHub actions
@ -21,40 +20,41 @@ type Runner interface {
// Config contains the config for a new runner
type Config struct {
Actor string // the user that triggered the event
Workdir string // path to working directory
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
DefaultBranch string // name of the main branch for this repository
ReuseContainers bool // reuse containers to maintain state
ForcePull bool // force pulling of the image, even if already present
ForceRebuild bool // force rebuilding local docker image action
LogOutput bool // log the output from docker run
JSONLogger bool // use json or text logger
Env map[string]string // env for containers
Inputs map[string]string // manually passed action inputs
Secrets map[string]string // list of secrets
Token string // GitHub token
InsecureSecrets bool // switch hiding output when printing to terminal
Platforms map[string]string // list of platforms
Privileged bool // use privileged mode
UsernsMode string // user namespace to use
ContainerArchitecture string // Desired OS/architecture platform for running containers
ContainerDaemonSocket string // Path to Docker daemon socket
ContainerOptions string // Options for the job container
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
GitHubInstance string // GitHub instance to use, default "github.com"
ContainerCapAdd []string // list of kernel capabilities to add to the containers
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
AutoRemove bool // controls if the container is automatically removed upon workflow completion
ArtifactServerPath string // the path where the artifact server stores uploads
ArtifactServerAddr string // the address the artifact server binds to
ArtifactServerPort string // the port the artifact server binds to
NoSkipCheckout bool // do not skip actions/checkout
RemoteName string // remote name in local git repo config
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
Actor string // the user that triggered the event
Workdir string // path to working directory
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
DefaultBranch string // name of the main branch for this repository
ReuseContainers bool // reuse containers to maintain state
ForcePull bool // force pulling of the image, even if already present
ForceRebuild bool // force rebuilding local docker image action
LogOutput bool // log the output from docker run
JSONLogger bool // use json or text logger
Env map[string]string // env for containers
Inputs map[string]string // manually passed action inputs
Secrets map[string]string // list of secrets
Token string // GitHub token
InsecureSecrets bool // switch hiding output when printing to terminal
Platforms map[string]string // list of platforms
Privileged bool // use privileged mode
UsernsMode string // user namespace to use
ContainerArchitecture string // Desired OS/architecture platform for running containers
ContainerDaemonSocket string // Path to Docker daemon socket
ContainerOptions string // Options for the job container
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
GitHubInstance string // GitHub instance to use, default "github.com"
ContainerCapAdd []string // list of kernel capabilities to add to the containers
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
AutoRemove bool // controls if the container is automatically removed upon workflow completion
ArtifactServerPath string // the path where the artifact server stores uploads
ArtifactServerAddr string // the address the artifact server binds to
ArtifactServerPort string // the port the artifact server binds to
NoSkipCheckout bool // do not skip actions/checkout
RemoteName string // remote name in local git repo config
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
Matrix map[string]map[string]bool // Matrix config to run
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
@ -67,6 +67,7 @@ type Config struct {
Vars map[string]string // the list of variables set at the repository, environment, or organization levels.
}
// GetToken: Adapt to Gitea
func (c Config) GetToken() string {
token := c.Secrets["GITHUB_TOKEN"]
if c.Secrets["GITEA_TOKEN"] != "" {
@ -137,11 +138,15 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
log.Errorf("Error while evaluating matrix: %v", err)
}
}
matrixes, err := job.GetMatrixes()
if err != nil {
var matrixes []map[string]interface{}
if m, err := job.GetMatrixes(); err != nil {
log.Errorf("Error while get job's matrix: %v", err)
// fall back to empty matrixes
} else {
matrixes = selectMatrixes(m, runner.config.Matrix)
}
log.Debugf("Final matrix after applying user inclusions '%v'", matrixes)
maxParallel := 4
if job.Strategy != nil {
maxParallel = job.Strategy.MaxParallel
@ -196,6 +201,25 @@ func handleFailure(plan *model.Plan) common.Executor {
}
}
func selectMatrixes(originalMatrixes []map[string]interface{}, targetMatrixValues map[string]map[string]bool) []map[string]interface{} {
matrixes := make([]map[string]interface{}, 0)
for _, original := range originalMatrixes {
flag := true
for key, val := range original {
if allowedVals, ok := targetMatrixValues[key]; ok {
valToString := fmt.Sprintf("%v", val)
if _, ok := allowedVals[valToString]; !ok {
flag = false
}
}
}
if flag {
matrixes = append(matrixes, original)
}
}
return matrixes
}
func (runner *runnerImpl) newRunContext(ctx context.Context, run *model.Run, matrix map[string]interface{}) *RunContext {
rc := &RunContext{
Config: runner.config,

View file

@ -186,6 +186,7 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
Inputs: cfg.Inputs,
GitHubInstance: "github.com",
ContainerArchitecture: cfg.ContainerArchitecture,
Matrix: cfg.Matrix,
}
runner, err := New(runnerConfig)
@ -293,7 +294,6 @@ func TestRunEvent(t *testing.T) {
{workdir, "workflow_dispatch-scalar-composite-action", "workflow_dispatch", "", platforms, secrets},
{workdir, "job-needs-context-contains-result", "push", "", platforms, secrets},
{"../model/testdata", "strategy", "push", "", platforms, secrets}, // TODO: move all testdata into pkg so we can validate it with planner and runner
// {"testdata", "issue-228", "push", "", platforms, }, // TODO [igni]: Remove this once everything passes
{"../model/testdata", "container-volumes", "push", "", platforms, secrets},
{workdir, "path-handling", "push", "", platforms, secrets},
{workdir, "do-not-leak-step-env-in-composite", "push", "", platforms, secrets},
@ -621,3 +621,30 @@ func TestRunEventPullRequest(t *testing.T) {
tjfi.runTest(context.Background(), t, &Config{EventPath: filepath.Join(workdir, workflowPath, "event.json")})
}
func TestRunMatrixWithUserDefinedInclusions(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
workflowPath := "matrix-with-user-inclusions"
tjfi := TestJobFileInfo{
workdir: workdir,
workflowPath: workflowPath,
eventName: "push",
errorMessage: "",
platforms: platforms,
}
matrix := map[string]map[string]bool{
"node": {
"8": true,
"8.x": true,
},
"os": {
"ubuntu-18.04": true,
},
}
tjfi.runTest(context.Background(), t, &Config{Matrix: matrix})
}

View file

@ -187,7 +187,7 @@ func setupEnv(ctx context.Context, step step) error {
mergeEnv(ctx, step)
// merge step env last, since it should not be overwritten
mergeIntoMap(step.getEnv(), step.getStepModel().GetEnv())
mergeIntoMap(step, step.getEnv(), step.getStepModel().GetEnv())
exprEval := rc.NewExpressionEvaluator(ctx)
for k, v := range *step.getEnv() {
@ -216,9 +216,9 @@ func mergeEnv(ctx context.Context, step step) {
c := job.Container()
if c != nil {
mergeIntoMap(env, rc.GetEnv(), c.Env)
mergeIntoMap(step, env, rc.GetEnv(), c.Env)
} else {
mergeIntoMap(env, rc.GetEnv())
mergeIntoMap(step, env, rc.GetEnv())
}
rc.withGithubEnv(ctx, step.getGithubContext(ctx), *env)
@ -258,10 +258,38 @@ func isContinueOnError(ctx context.Context, expr string, step step, stage stepSt
return continueOnError, nil
}
func mergeIntoMap(target *map[string]string, maps ...map[string]string) {
func mergeIntoMap(step step, target *map[string]string, maps ...map[string]string) {
if rc := step.getRunContext(); rc != nil && rc.JobContainer != nil && rc.JobContainer.IsEnvironmentCaseInsensitive() {
mergeIntoMapCaseInsensitive(*target, maps...)
} else {
mergeIntoMapCaseSensitive(*target, maps...)
}
}
func mergeIntoMapCaseSensitive(target map[string]string, maps ...map[string]string) {
for _, m := range maps {
for k, v := range m {
(*target)[k] = v
target[k] = v
}
}
}
func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]string) {
foldKeys := make(map[string]string, len(target))
for k := range target {
foldKeys[strings.ToLower(k)] = k
}
toKey := func(s string) string {
foldKey := strings.ToLower(s)
if k, ok := foldKeys[foldKey]; ok {
return k
}
foldKeys[strings.ToLower(foldKey)] = s
return s
}
for _, m := range maps {
for k, v := range m {
target[toKey(k)] = v
}
}
}

View file

@ -13,10 +13,11 @@ import (
)
type stepRun struct {
Step *model.Step
RunContext *RunContext
cmd []string
env map[string]string
Step *model.Step
RunContext *RunContext
cmd []string
env map[string]string
WorkingDirectory string
}
func (sr *stepRun) pre() common.Executor {
@ -27,12 +28,11 @@ func (sr *stepRun) pre() common.Executor {
func (sr *stepRun) main() common.Executor {
sr.env = map[string]string{}
return runStepExecutor(sr, stepStageMain, common.NewPipelineExecutor(
sr.setupShellCommandExecutor(),
func(ctx context.Context) error {
sr.getRunContext().ApplyExtraPath(ctx, &sr.env)
return sr.getRunContext().JobContainer.Exec(sr.cmd, sr.env, "", sr.Step.WorkingDirectory)(ctx)
return sr.getRunContext().JobContainer.Exec(sr.cmd, sr.env, "", sr.WorkingDirectory)(ctx)
},
))
}
@ -167,16 +167,20 @@ func (sr *stepRun) setupShell(ctx context.Context) {
func (sr *stepRun) setupWorkingDirectory(ctx context.Context) {
rc := sr.RunContext
step := sr.Step
workingdirectory := ""
if step.WorkingDirectory == "" {
step.WorkingDirectory = rc.Run.Job().Defaults.Run.WorkingDirectory
workingdirectory = rc.Run.Job().Defaults.Run.WorkingDirectory
} else {
workingdirectory = step.WorkingDirectory
}
// jobs can receive context values, so we interpolate
step.WorkingDirectory = rc.NewExpressionEvaluator(ctx).Interpolate(ctx, step.WorkingDirectory)
workingdirectory = rc.NewExpressionEvaluator(ctx).Interpolate(ctx, workingdirectory)
// but top level keys in workflow file like `defaults` or `env` can't
if step.WorkingDirectory == "" {
step.WorkingDirectory = rc.Run.Workflow.Defaults.Run.WorkingDirectory
if workingdirectory == "" {
workingdirectory = rc.Run.Workflow.Defaults.Run.WorkingDirectory
}
sr.WorkingDirectory = workingdirectory
}

View file

@ -63,7 +63,9 @@ func TestMergeIntoMap(t *testing.T) {
for _, tt := range table {
t.Run(tt.name, func(t *testing.T) {
mergeIntoMap(&tt.target, tt.maps...)
mergeIntoMapCaseSensitive(tt.target, tt.maps...)
assert.Equal(t, tt.expected, tt.target)
mergeIntoMapCaseInsensitive(tt.target, tt.maps...)
assert.Equal(t, tt.expected, tt.target)
})
}

View file

@ -0,0 +1,10 @@
name: reusable
on:
- workflow_call
jobs:
reusable_workflow_job:
runs-on: ubuntu-latest
steps:
- run: echo Test

View file

@ -0,0 +1,9 @@
name: reusable
on: workflow_call
jobs:
reusable_workflow_job:
runs-on: ubuntu-latest
steps:
- run: echo Test

View file

@ -1,14 +0,0 @@
name: issue-228
on:
- push
jobs:
kind:
runs-on: ubuntu-latest
steps:
- run: apt-get update -y && apt-get install git -y # setup git credentials will fail otherwise
- name: Setup git credentials
uses: fusion-engineering/setup-git-credentials@v2
with:
credentials: https://test@github.com/

View file

@ -0,0 +1,34 @@
name: matrix-with-user-inclusions
on: push
jobs:
build:
name: PHP ${{ matrix.os }} ${{ matrix.node}}
runs-on: ubuntu-latest
steps:
- run: |
echo ${NODE_VERSION} | grep 8
echo ${OS_VERSION} | grep ubuntu-18.04
env:
NODE_VERSION: ${{ matrix.node }}
OS_VERSION: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-18.04, macos-latest]
node: [4, 6, 8, 10]
exclude:
- os: macos-latest
node: 4
include:
- os: ubuntu-16.04
node: 10
test:
runs-on: ubuntu-latest
strategy:
matrix:
node: [8.x, 10.x, 12.x, 13.x]
steps:
- run: echo ${NODE_VERSION} | grep 8.x
env:
NODE_VERSION: ${{ matrix.node }}

View file

@ -19,6 +19,12 @@ jobs:
number_required: 1
secrets: inherit
reusable-workflow-with-on-string-notation:
uses: ./.github/workflows/local-reusable-workflow-no-inputs-string.yml
reusable-workflow-with-on-array-notation:
uses: ./.github/workflows/local-reusable-workflow-no-inputs-array.yml
output-test:
runs-on: ubuntu-latest
needs:

View file

@ -0,0 +1,7 @@
runs:
using: composite
steps:
- run: |
echo $env:GITHUB_ENV
echo "kEy=n/a" > $env:GITHUB_ENV
shell: pwsh

View file

@ -25,3 +25,20 @@ jobs:
echo "Unexpected value for `$env:key2: $env:key2"
exit 1
}
- run: |
echo $env:GITHUB_ENV
echo "KEY=test" > $env:GITHUB_ENV
echo "Key=expected" > $env:GITHUB_ENV
- name: Assert GITHUB_ENV is merged case insensitive
run: exit 1
if: env.KEY != 'expected' || env.Key != 'expected' || env.key != 'expected'
- name: Assert step env is merged case insensitive
run: exit 1
if: env.KEY != 'n/a' || env.Key != 'n/a' || env.key != 'n/a'
env:
KeY: 'n/a'
- uses: actions/checkout@v3
- uses: ./windows-add-env
- name: Assert composite env is merged case insensitive
run: exit 1
if: env.KEY != 'n/a' || env.Key != 'n/a' || env.key != 'n/a'

View file

@ -11,6 +11,9 @@ jobs:
mkdir build
echo '@echo off' > build/test.cmd
echo 'echo Hi' >> build/test.cmd
mkdir build2
echo '@echo off' > build2/test2.cmd
echo 'echo test2' >> build2/test2.cmd
- run: |
echo '${{ tojson(runner) }}'
ls
@ -23,3 +26,9 @@ jobs:
- run: |
echo $env:PATH
test
- run: |
echo "PATH=$env:PATH;${{ github.workspace }}\build2" > $env:GITHUB_ENV
- run: |
echo $env:PATH
test
test2

View file

@ -22,3 +22,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- run: '[[ "$(pwd)" == "/tmp" ]]'
workdir-from-matrix:
runs-on: ubuntu-latest
strategy:
max-parallel: 1
matrix:
work_dir: ["/tmp", "/root"]
steps:
- run: '[[ "$(pwd)" == "${{ matrix.work_dir }}" ]]'
working-directory: ${{ matrix.work_dir }}

27
cmd/dir.go Normal file
View file

@ -0,0 +1,27 @@
package cmd
import (
"os"
"path/filepath"
log "github.com/sirupsen/logrus"
)
var (
UserHomeDir string
CacheHomeDir string
)
func init() {
home, err := os.UserHomeDir()
if err != nil {
log.Fatal(err)
}
UserHomeDir = home
if v := os.Getenv("XDG_CACHE_HOME"); v != "" {
CacheHomeDir = v
} else {
CacheHomeDir = filepath.Join(UserHomeDir, ".cache")
}
}

View file

@ -42,11 +42,16 @@ type Input struct {
artifactServerPath string
artifactServerAddr string
artifactServerPort string
noCacheServer bool
cacheServerPath string
cacheServerAddr string
cacheServerPort uint16
jsonLogger bool
noSkipCheckout bool
remoteName string
replaceGheActionWithGithubCom []string
replaceGheActionTokenWithGithubCom string
matrix []string
}
func (i *Input) resolve(path string) string {

View file

@ -11,7 +11,6 @@ import (
"strings"
"time"
"github.com/mitchellh/go-homedir"
log "github.com/sirupsen/logrus"
)
@ -133,16 +132,7 @@ func saveNoticesEtag(etag string) {
}
func etagPath() string {
var xdgCache string
var ok bool
if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" {
if home, err := homedir.Dir(); err == nil {
xdgCache = filepath.Join(home, ".cache")
} else if xdgCache, err = filepath.Abs("."); err != nil {
log.Fatal(err)
}
}
dir := filepath.Join(xdgCache, "act")
dir := filepath.Join(CacheHomeDir, "act")
if err := os.MkdirAll(dir, 0o777); err != nil {
log.Fatal(err)
}

View file

@ -15,11 +15,12 @@ import (
"github.com/adrg/xdg"
"github.com/andreaskoch/go-fswatch"
"github.com/joho/godotenv"
"github.com/mitchellh/go-homedir"
gitignore "github.com/sabhiram/go-gitignore"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"github.com/nektos/act/pkg/artifactcache"
"github.com/nektos/act/pkg/artifacts"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
@ -66,6 +67,7 @@ func Execute(ctx context.Context, version string) {
rootCmd.Flags().BoolVar(&input.autoRemove, "rm", false, "automatically remove container(s)/volume(s) after a workflow(s) failure")
rootCmd.Flags().StringArrayVarP(&input.replaceGheActionWithGithubCom, "replace-ghe-action-with-github-com", "", []string{}, "If you are using GitHub Enterprise Server and allow specified actions from GitHub (github.com), you can set actions on this. (e.g. --replace-ghe-action-with-github-com =github/super-linter)")
rootCmd.Flags().StringVar(&input.replaceGheActionTokenWithGithubCom, "replace-ghe-action-token-with-github-com", "", "If you are using replace-ghe-action-with-github-com and you want to use private actions on GitHub, you have to set personal access token")
rootCmd.Flags().StringArrayVarP(&input.matrix, "matrix", "", []string{}, "specify which matrix configuration to include (e.g. --matrix java:13")
rootCmd.PersistentFlags().StringVarP(&input.actor, "actor", "a", "nektos/act", "user that triggered the event")
rootCmd.PersistentFlags().StringVarP(&input.workflowsPath, "workflows", "W", "./.github/workflows/", "path to workflow file(s)")
rootCmd.PersistentFlags().BoolVarP(&input.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag")
@ -79,13 +81,17 @@ func Execute(ctx context.Context, version string) {
rootCmd.PersistentFlags().StringVarP(&input.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
rootCmd.PersistentFlags().StringVarP(&input.inputfile, "input-file", "", ".input", "input file to read and use as action input")
rootCmd.PersistentFlags().StringVarP(&input.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "", "URI to Docker Engine socket (e.g.: unix://~/.docker/run/docker.sock or - to disable bind mounting the socket)")
rootCmd.PersistentFlags().StringVarP(&input.containerOptions, "container-options", "", "", "Custom docker container options for the job container without an options property in the job definition")
rootCmd.PersistentFlags().StringVarP(&input.githubInstance, "github-instance", "", "github.com", "GitHub instance to use. Don't use this if you are not using GitHub Enterprise Server.")
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPath, "artifact-server-path", "", "", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
rootCmd.PersistentFlags().StringVarP(&input.artifactServerAddr, "artifact-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the artifact server binds.")
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens.")
rootCmd.PersistentFlags().BoolVarP(&input.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
rootCmd.PersistentFlags().BoolVarP(&input.noCacheServer, "no-cache-server", "", false, "Disable cache server")
rootCmd.PersistentFlags().StringVarP(&input.cacheServerPath, "cache-server-path", "", filepath.Join(CacheHomeDir, "actcache"), "Defines the path where the cache server stores caches.")
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
rootCmd.SetArgs(args())
if err := rootCmd.Execute(); err != nil {
@ -94,11 +100,6 @@ func Execute(ctx context.Context, version string) {
}
func configLocations() []string {
home, err := homedir.Dir()
if err != nil {
log.Fatal(err)
}
configFileName := ".actrc"
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
@ -111,12 +112,39 @@ func configLocations() []string {
}
return []string{
filepath.Join(home, configFileName),
filepath.Join(UserHomeDir, configFileName),
actrcXdg,
filepath.Join(".", configFileName),
}
}
var commonSocketPaths = []string{
"/var/run/docker.sock",
"/var/run/podman/podman.sock",
"$HOME/.colima/docker.sock",
"$XDG_RUNTIME_DIR/docker.sock",
`\\.\pipe\docker_engine`,
"$HOME/.docker/run/docker.sock",
}
// returns socket path or false if not found any
func socketLocation() (string, bool) {
if dockerHost, exists := os.LookupEnv("DOCKER_HOST"); exists {
return dockerHost, true
}
for _, p := range commonSocketPaths {
if _, err := os.Lstat(os.ExpandEnv(p)); err == nil {
if strings.HasPrefix(p, `\\.\`) {
return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), true
}
return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), true
}
}
return "", false
}
func args() []string {
actrc := configLocations()
@ -130,15 +158,6 @@ func args() []string {
}
func bugReport(ctx context.Context, version string) error {
var commonSocketPaths = []string{
"/var/run/docker.sock",
"/var/run/podman/podman.sock",
"$HOME/.colima/docker.sock",
"$XDG_RUNTIME_DIR/docker.sock",
`\\.\pipe\docker_engine`,
"$HOME/.docker/run/docker.sock",
}
sprintf := func(key, val string) string {
return fmt.Sprintf("%-24s%s\n", key, val)
}
@ -149,19 +168,20 @@ func bugReport(ctx context.Context, version string) error {
report += sprintf("NumCPU:", fmt.Sprint(runtime.NumCPU()))
var dockerHost string
if dockerHost = os.Getenv("DOCKER_HOST"); dockerHost == "" {
dockerHost = "DOCKER_HOST environment variable is unset/empty."
var exists bool
if dockerHost, exists = os.LookupEnv("DOCKER_HOST"); !exists {
dockerHost = "DOCKER_HOST environment variable is not set"
} else if dockerHost == "" {
dockerHost = "DOCKER_HOST environment variable is empty."
}
report += sprintf("Docker host:", dockerHost)
report += fmt.Sprintln("Sockets found:")
for _, p := range commonSocketPaths {
if strings.HasPrefix(p, `$`) {
v := strings.Split(p, `/`)[0]
p = strings.Replace(p, v, os.Getenv(strings.TrimPrefix(v, `$`)), 1)
}
if _, err := os.Stat(p); err != nil {
if _, err := os.Lstat(os.ExpandEnv(p)); err != nil {
continue
} else if _, err := os.Stat(os.ExpandEnv(p)); err != nil {
report += fmt.Sprintf("\t%s(broken)\n", p)
} else {
report += fmt.Sprintf("\t%s\n", p)
}
@ -281,9 +301,26 @@ func parseEnvs(env []string, envs map[string]string) bool {
return false
}
func readYamlFile(file string) (map[string]string, error) {
content, err := os.ReadFile(file)
if err != nil {
return nil, err
}
ret := map[string]string{}
if err = yaml.Unmarshal(content, &ret); err != nil {
return nil, err
}
return ret, nil
}
func readEnvs(path string, envs map[string]string) bool {
if _, err := os.Stat(path); err == nil {
env, err := godotenv.Read(path)
var env map[string]string
if ext := filepath.Ext(path); ext == ".yml" || ext == ".yaml" {
env, err = readYamlFile(path)
} else {
env, err = godotenv.Read(path)
}
if err != nil {
log.Fatalf("Error loading from %s: %v", path, err)
}
@ -295,6 +332,36 @@ func readEnvs(path string, envs map[string]string) bool {
return false
}
func parseMatrix(matrix []string) map[string]map[string]bool {
// each matrix entry should be of the form - string:string
r := regexp.MustCompile(":")
matrixes := make(map[string]map[string]bool)
for _, m := range matrix {
matrix := r.Split(m, 2)
if len(matrix) < 2 {
log.Fatalf("Invalid matrix format. Failed to parse %s", m)
} else {
if _, ok := matrixes[matrix[0]]; !ok {
matrixes[matrix[0]] = make(map[string]bool)
}
matrixes[matrix[0]][matrix[1]] = true
}
}
return matrixes
}
func isDockerHostURI(daemonPath string) bool {
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
scheme := daemonPath[:protoIndex]
if strings.IndexFunc(scheme, func(r rune) bool {
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
}) == -1 {
return true
}
}
return false
}
//nolint:gocyclo
func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
@ -306,13 +373,35 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
return bugReport(ctx, cmd.Version)
}
// Prefer DOCKER_HOST, don't override it
socketPath, hasDockerHost := os.LookupEnv("DOCKER_HOST")
if !hasDockerHost {
// a - in containerDaemonSocket means don't mount, preserve this value
// otherwise if input.containerDaemonSocket is a filepath don't use it as socketPath
skipMount := input.containerDaemonSocket == "-" || !isDockerHostURI(input.containerDaemonSocket)
if input.containerDaemonSocket != "" && !skipMount {
socketPath = input.containerDaemonSocket
} else {
socket, found := socketLocation()
if !found {
log.Errorln("daemon Docker Engine socket not found and containerDaemonSocket option was not set")
} else {
socketPath = socket
}
if !skipMount {
input.containerDaemonSocket = socketPath
}
}
os.Setenv("DOCKER_HOST", socketPath)
}
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" && input.containerArchitecture == "" {
l := log.New()
l.SetFormatter(&log.TextFormatter{
DisableQuote: true,
DisableTimestamp: true,
})
l.Warnf(" \U000026A0 You are using Apple M1 chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
l.Warnf(" \U000026A0 You are using Apple M-series chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
}
log.Debugf("Loading environment from %s", input.Envfile())
@ -329,6 +418,9 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
secrets := newSecrets(input.secrets)
_ = readEnvs(input.Secretfile(), secrets)
matrixes := parseMatrix(input.matrix)
log.Debugf("Evaluated matrix inclusions: %v", matrixes)
planner, err := model.NewWorkflowPlanner(input.WorkflowsPath(), input.noWorkflowRecurse)
if err != nil {
return err
@ -508,6 +600,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
RemoteName: input.remoteName,
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
Matrix: matrixes,
}
r, err := runner.New(config)
if err != nil {
@ -516,6 +609,17 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerAddr, input.artifactServerPort)
const cacheURLKey = "ACTIONS_CACHE_URL"
var cacheHandler *artifactcache.Handler
if !input.noCacheServer && envs[cacheURLKey] == "" {
var err error
cacheHandler, err = artifactcache.StartHandler(input.cacheServerPath, input.cacheServerAddr, input.cacheServerPort, common.Logger(ctx))
if err != nil {
return err
}
envs[cacheURLKey] = cacheHandler.ExternalURL() + "/"
}
ctx = common.WithDryrun(ctx, input.dryrun)
if watch, err := cmd.Flags().GetBool("watch"); err != nil {
return err
@ -529,6 +633,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
cancel()
_ = cacheHandler.Close()
return nil
})
err = executor(ctx)
@ -583,45 +688,47 @@ func defaultImageSurvey(actrc string) error {
}
func watchAndRun(ctx context.Context, fn common.Executor) error {
recurse := true
checkIntervalInSeconds := 2
dir, err := os.Getwd()
if err != nil {
return err
}
var ignore *gitignore.GitIgnore
if _, err := os.Stat(filepath.Join(dir, ".gitignore")); !os.IsNotExist(err) {
ignore, _ = gitignore.CompileIgnoreFile(filepath.Join(dir, ".gitignore"))
} else {
ignore = &gitignore.GitIgnore{}
ignoreFile := filepath.Join(dir, ".gitignore")
ignore := &gitignore.GitIgnore{}
if info, err := os.Stat(ignoreFile); err == nil && !info.IsDir() {
ignore, err = gitignore.CompileIgnoreFile(ignoreFile)
if err != nil {
return fmt.Errorf("compile %q: %w", ignoreFile, err)
}
}
folderWatcher := fswatch.NewFolderWatcher(
dir,
recurse,
true,
ignore.MatchesPath,
checkIntervalInSeconds,
2, // 2 seconds
)
folderWatcher.Start()
defer folderWatcher.Stop()
go func() {
for folderWatcher.IsRunning() {
if err = fn(ctx); err != nil {
break
}
log.Debugf("Watching %s for changes", dir)
for changes := range folderWatcher.ChangeDetails() {
log.Debugf("%s", changes.String())
if err = fn(ctx); err != nil {
break
}
log.Debugf("Watching %s for changes", dir)
// run once before watching
if err := fn(ctx); err != nil {
return err
}
for folderWatcher.IsRunning() {
log.Debugf("Watching %s for changes", dir)
select {
case <-ctx.Done():
return nil
case changes := <-folderWatcher.ChangeDetails():
log.Debugf("%s", changes.String())
if err := fn(ctx); err != nil {
return err
}
}
}()
<-ctx.Done()
folderWatcher.Stop()
return err
}
return nil
}