commit 957282b8bc85320778470e042fa6391246d11bee Author: Casey Lee Date: Sat Jan 12 20:45:25 2019 -0800 Initial commit with support for GitHub actions diff --git a/.github/actions/check/Dockerfile b/.github/actions/check/Dockerfile new file mode 100644 index 00000000..4f299faf --- /dev/null +++ b/.github/actions/check/Dockerfile @@ -0,0 +1,10 @@ +FROM golang:1.11.4-stretch + +RUN go get -u honnef.co/go/tools/cmd/staticcheck +RUN go get -u golang.org/x/lint/golint +RUN go get -u github.com/fzipp/gocyclo + +COPY "entrypoint.sh" "/entrypoint.sh" +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/.github/actions/check/entrypoint.sh b/.github/actions/check/entrypoint.sh new file mode 100644 index 00000000..24896610 --- /dev/null +++ b/.github/actions/check/entrypoint.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +#GOPATH=/go +#PATH=${GOPATH}/bin:/usr/local/go/bin:${PATH} + +go vet ./... +golint -set_exit_status ./... +staticcheck ./... +gocyclo -over 10 . +go test -cover ./... \ No newline at end of file diff --git a/.github/main.workflow b/.github/main.workflow new file mode 100644 index 00000000..b6d13a91 --- /dev/null +++ b/.github/main.workflow @@ -0,0 +1,21 @@ +workflow "check-and-release" { + on = "push" + resolves = ["release"] +} + +action "check" { + uses = "./.github/actions/check" +} + + action "branch-filter" { + needs = ["check"] + uses = "actions/bin/filter@master" + args = "tag v*" + } + + action "release" { + needs = ["branch-filter"] + uses = "docker://goreleaser/goreleaser:v0.97" + args = "release" + secrets = ["GITHUB_TOKEN"] + } \ No newline at end of file diff --git a/actions/log.go b/actions/log.go new file mode 100644 index 00000000..122da7f9 --- /dev/null +++ b/actions/log.go @@ -0,0 +1,109 @@ +package actions + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" +) + +type actionLogFormatter struct { +} + +var formatter *actionLogFormatter + +func init() { + formatter = new(actionLogFormatter) +} + +const ( + //nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 36 + gray = 37 +) + +func newActionLogger(actionName string, dryrun bool) *logrus.Entry { + logger := logrus.New() + logger.SetFormatter(formatter) + logger.SetLevel(logrus.GetLevel()) + rtn := logger.WithFields(logrus.Fields{"action_name": actionName, "dryrun": dryrun}) + return rtn +} + +func (f *actionLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + b := &bytes.Buffer{} + + if f.isColored(entry) { + f.printColored(b, entry) + } else { + f.print(b, entry) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *actionLogFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) { + var levelColor int + switch entry.Level { + case logrus.DebugLevel, logrus.TraceLevel: + levelColor = gray + case logrus.WarnLevel: + levelColor = yellow + case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel: + levelColor = red + default: + levelColor = blue + } + + entry.Message = strings.TrimSuffix(entry.Message, "\n") + actionName := entry.Data["action_name"] + + if entry.Data["dryrun"] == true { + fmt.Fprintf(b, "\x1b[%dm*DRYRUN* \x1b[%dm[%s] \x1b[0m%s", green, levelColor, actionName, entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm[%s] \x1b[0m%s", levelColor, actionName, entry.Message) + } +} + +func (f *actionLogFormatter) print(b *bytes.Buffer, entry *logrus.Entry) { + entry.Message = strings.TrimSuffix(entry.Message, "\n") + actionName := entry.Data["action_name"] + + if entry.Data["dryrun"] == true { + fmt.Fprintf(b, "*DRYRUN* [%s] %s", actionName, entry.Message) + } else { + fmt.Fprintf(b, "[%s] %s", actionName, entry.Message) + } +} + +func (f *actionLogFormatter) isColored(entry *logrus.Entry) bool { + + isColored := checkIfTerminal(entry.Logger.Out) + + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } + + return isColored +} + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/actions/parser.go b/actions/parser.go new file mode 100644 index 00000000..6fae1dd9 --- /dev/null +++ b/actions/parser.go @@ -0,0 +1,76 @@ +package actions + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + log "github.com/sirupsen/logrus" +) + +// ParseWorkflows will read in the set of actions from the workflow file +func ParseWorkflows(workingDir string, workflowPath string) (Workflows, error) { + workingDir, err := filepath.Abs(workingDir) + if err != nil { + return nil, err + } + log.Debugf("Setting working dir to %s", workingDir) + + if !filepath.IsAbs(workflowPath) { + workflowPath = filepath.Join(workingDir, workflowPath) + } + log.Debugf("Loading workflow config from %s", workflowPath) + workflowReader, err := os.Open(workflowPath) + if err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + buf.ReadFrom(workflowReader) + + workflows := new(workflowsFile) + workflows.WorkingDir = workingDir + workflows.WorkflowPath = workflowPath + + astFile, err := hcl.ParseBytes(buf.Bytes()) + if err != nil { + return nil, err + } + rootNode := ast.Walk(astFile.Node, cleanWorkflowsAST) + err = hcl.DecodeObject(workflows, rootNode) + if err != nil { + return nil, err + } + + workflows.TempDir, err = ioutil.TempDir("/tmp", "act-") + if err != nil { + return nil, err + } + + // TODO: add validation logic + // - check for circular dependencies + // - check for valid local path refs + // - check for valid dependencies + + return workflows, nil +} + +func cleanWorkflowsAST(node ast.Node) (ast.Node, bool) { + if objectItem, ok := node.(*ast.ObjectItem); ok { + key := objectItem.Keys[0].Token.Value() + + // handle condition where value is a string but should be a list + switch key { + case "resolves", "needs", "args": + if literalType, ok := objectItem.Val.(*ast.LiteralType); ok { + listType := new(ast.ListType) + listType.Add(literalType) + objectItem.Val = listType + } + } + } + return node, true +} diff --git a/actions/runner.go b/actions/runner.go new file mode 100644 index 00000000..472ba5a7 --- /dev/null +++ b/actions/runner.go @@ -0,0 +1,412 @@ +package actions + +import ( + "archive/tar" + "bytes" + "context" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "sort" + + "github.com/howeyc/gopass" + "github.com/nektos/act/common" + "github.com/nektos/act/container" + log "github.com/sirupsen/logrus" +) + +var secretCache map[string]string + +func (w *workflowsFile) ListEvents() []string { + log.Debugf("Listing all events") + events := make([]string, 0) + for _, w := range w.Workflow { + events = append(events, w.On) + } + + // sort the list based on depth of dependencies + sort.Slice(events, func(i, j int) bool { + return events[i] < events[j] + }) + + return events +} + +func (w *workflowsFile) GraphEvent(eventName string) ([][]string, error) { + log.Debugf("Listing actions for event '%s'", eventName) + workflow, _, err := w.getWorkflow(eventName) + if err != nil { + return nil, err + } + return w.newExecutionGraph(workflow.Resolves...), nil +} + +func (w *workflowsFile) RunAction(ctx context.Context, dryrun bool, actionName string) error { + log.Debugf("Running action '%s'", actionName) + return w.newActionExecutor(ctx, dryrun, "", actionName)() +} + +func (w *workflowsFile) RunEvent(ctx context.Context, dryrun bool, eventName string) error { + log.Debugf("Running event '%s'", eventName) + workflow, _, err := w.getWorkflow(eventName) + if err != nil { + return err + } + + log.Debugf("Running actions %s -> %s", eventName, workflow.Resolves) + return w.newActionExecutor(ctx, dryrun, eventName, workflow.Resolves...)() +} + +func (w *workflowsFile) getWorkflow(eventName string) (*workflowDef, string, error) { + for wName, w := range w.Workflow { + if w.On == eventName { + return &w, wName, nil + } + } + return nil, "", fmt.Errorf("unsupported event: %v", eventName) +} + +func (w *workflowsFile) getAction(actionName string) (*actionDef, error) { + if a, ok := w.Action[actionName]; ok { + return &a, nil + } + return nil, fmt.Errorf("unsupported action: %v", actionName) +} + +func (w *workflowsFile) Close() { + os.RemoveAll(w.TempDir) +} + +// return a pipeline that is run in series. pipeline is a list of steps to run in parallel +func (w *workflowsFile) newExecutionGraph(actionNames ...string) [][]string { + // first, build a list of all the necessary actions to run, and their dependencies + actionDependencies := make(map[string][]string) + for len(actionNames) > 0 { + newActionNames := make([]string, 0) + for _, aName := range actionNames { + // make sure we haven't visited this action yet + if _, ok := actionDependencies[aName]; !ok { + actionDependencies[aName] = w.Action[aName].Needs + newActionNames = append(newActionNames, w.Action[aName].Needs...) + } + } + actionNames = newActionNames + } + + // next, build an execution graph + graph := make([][]string, 0) + for len(actionDependencies) > 0 { + stage := make([]string, 0) + for aName, aDeps := range actionDependencies { + // make sure all deps are in the graph already + if listInLists(aDeps, graph...) { + stage = append(stage, aName) + delete(actionDependencies, aName) + } + } + if len(stage) == 0 { + log.Fatalf("Unable to build dependency graph!") + } + graph = append(graph, stage) + } + + return graph +} + +// return true iff all strings in srcList exist in at least one of the searchLists +func listInLists(srcList []string, searchLists ...[]string) bool { + for _, src := range srcList { + found := false + for _, searchList := range searchLists { + for _, search := range searchList { + if src == search { + found = true + } + } + } + if !found { + return false + } + } + return true +} + +func (w *workflowsFile) newActionExecutor(ctx context.Context, dryrun bool, eventName string, actionNames ...string) common.Executor { + graph := w.newExecutionGraph(actionNames...) + + pipeline := make([]common.Executor, 0) + for _, actions := range graph { + stage := make([]common.Executor, 0) + for _, actionName := range actions { + action, err := w.getAction(actionName) + if err != nil { + return common.NewErrorExecutor(err) + } + actionExecutor := action.asExecutor(ctx, dryrun, w.WorkingDir, w.TempDir, actionName, w.setupEnvironment(eventName, actionName, dryrun)) + stage = append(stage, actionExecutor) + } + pipeline = append(pipeline, common.NewParallelExecutor(stage...)) + } + + return common.NewPipelineExecutor(pipeline...) +} + +func (action *actionDef) asExecutor(ctx context.Context, dryrun bool, workingDir string, tempDir string, actionName string, env []string) common.Executor { + logger := newActionLogger(actionName, dryrun) + log.Debugf("Using '%s' for action '%s'", action.Uses, actionName) + + in := container.DockerExecutorInput{ + Ctx: ctx, + Logger: logger, + Dryrun: dryrun, + } + + var image string + executors := make([]common.Executor, 0) + if imageRef, ok := parseImageReference(action.Uses); ok { + executors = append(executors, container.NewDockerPullExecutor(container.NewDockerPullExecutorInput{ + DockerExecutorInput: in, + Image: imageRef, + })) + image = imageRef + } else if contextDir, imageTag, ok := parseImageLocal(workingDir, action.Uses); ok { + executors = append(executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{ + DockerExecutorInput: in, + ContextDir: contextDir, + ImageTag: imageTag, + })) + image = imageTag + } else if cloneURL, ref, path, ok := parseImageGithub(action.Uses); ok { + cloneDir := filepath.Join(os.TempDir(), "act", action.Uses) + executors = append(executors, common.NewGitCloneExecutor(common.NewGitCloneExecutorInput{ + URL: cloneURL, + Ref: ref, + Dir: cloneDir, + Logger: logger, + Dryrun: dryrun, + })) + + contextDir := filepath.Join(cloneDir, path) + imageTag := fmt.Sprintf("%s:%s", filepath.Base(cloneURL.Path), ref) + + executors = append(executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{ + DockerExecutorInput: in, + ContextDir: contextDir, + ImageTag: imageTag, + })) + image = imageTag + } else { + return common.NewErrorExecutor(fmt.Errorf("unable to determine executor type for image '%s'", action.Uses)) + } + + ghReader, err := action.createGithubTarball() + if err != nil { + return common.NewErrorExecutor(err) + } + randSuffix := randString(6) + containerName := regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-") + if len(containerName)+len(randSuffix)+1 > 30 { + containerName = containerName[:(30 - (len(randSuffix) + 1))] + } + executors = append(executors, container.NewDockerRunExecutor(container.NewDockerRunExecutorInput{ + DockerExecutorInput: in, + Cmd: action.Args, + Image: image, + WorkingDir: "/github/workspace", + Env: env, + Name: fmt.Sprintf("%s-%s", containerName, randSuffix), + Binds: []string{ + fmt.Sprintf("%s:%s", workingDir, "/github/workspace"), + fmt.Sprintf("%s:%s", tempDir, "/github/home"), + fmt.Sprintf("%s:%s", "/var/run/docker.sock", "/var/run/docker.sock"), + }, + Content: map[string]io.Reader{"/github": ghReader}, + })) + + return common.NewPipelineExecutor(executors...) +} + +const letterBytes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randString(slen int) string { + b := make([]byte, slen) + for i := range b { + b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))] + } + return string(b) +} + +func (action *actionDef) createGithubTarball() (io.Reader, error) { + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + var files = []struct { + Name string + Mode int64 + Body string + }{ + {"workflow/event.json", 0644, "{}"}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Mode: file.Mode, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + + return &buf, nil + +} + +func (w *workflowsFile) setupEnvironment(eventName string, actionName string, dryrun bool) []string { + env := make([]string, 0) + repoPath := w.WorkingDir + + _, workflowName, _ := w.getWorkflow(eventName) + + env = append(env, fmt.Sprintf("HOME=/github/home")) + env = append(env, fmt.Sprintf("GITHUB_ACTOR=nektos/act")) + env = append(env, fmt.Sprintf("GITHUB_EVENT_PATH=/github/workflow/event.json")) + env = append(env, fmt.Sprintf("GITHUB_WORKSPACE=/github/workspace")) + env = append(env, fmt.Sprintf("GITHUB_WORKFLOW=%s", workflowName)) + env = append(env, fmt.Sprintf("GITHUB_EVENT_NAME=%s", eventName)) + env = append(env, fmt.Sprintf("GITHUB_ACTION=%s", actionName)) + + _, rev, err := common.FindGitRevision(repoPath) + if err != nil { + log.Warningf("unable to get git revision: %v", err) + } else { + env = append(env, fmt.Sprintf("GITHUB_SHA=%s", rev)) + } + + repo, err := common.FindGithubRepo(repoPath) + if err != nil { + log.Warningf("unable to get git repo: %v", err) + } else { + env = append(env, fmt.Sprintf("GITHUB_REPOSITORY=%s", repo)) + } + + branch, err := common.FindGitBranch(repoPath) + if err != nil { + log.Warningf("unable to get git branch: %v", err) + } else { + env = append(env, fmt.Sprintf("GITHUB_REF=refs/heads/%s", branch)) + } + + action, err := w.getAction(actionName) + if err == nil && !dryrun { + action.applyEnvironmentSecrets(&env) + } + + return env +} + +func (action *actionDef) applyEnvironmentSecrets(env *[]string) { + if action != nil { + for envKey, envValue := range action.Env { + *env = append(*env, fmt.Sprintf("%s=%s", envKey, envValue)) + } + + for _, secret := range action.Secrets { + if secretVal, ok := os.LookupEnv(secret); ok { + *env = append(*env, fmt.Sprintf("%s=%s", secret, secretVal)) + } else { + if secretCache == nil { + secretCache = make(map[string]string) + } + + if secretCache[secret] == "" { + fmt.Printf("Provide value for '%s': ", secret) + val, err := gopass.GetPasswdMasked() + if err != nil { + log.Fatal("abort") + } + + secretCache[secret] = string(val) + } + *env = append(*env, fmt.Sprintf("%s=%s", secret, secretCache[secret])) + } + } + } +} + +// imageURL is the directory where a `Dockerfile` should exist +func parseImageLocal(workingDir string, contextDir string) (contextDirOut string, tag string, ok bool) { + if !filepath.IsAbs(contextDir) { + contextDir = filepath.Join(workingDir, contextDir) + } + if _, err := os.Stat(filepath.Join(contextDir, "Dockerfile")); os.IsNotExist(err) { + log.Debugf("Ignoring missing Dockerfile '%s/Dockerfile'", contextDir) + return "", "", false + } + + sha, _, err := common.FindGitRevision(contextDir) + if err != nil { + log.Warnf("Unable to determine git revision: %v", err) + sha = "latest" + } + return contextDir, fmt.Sprintf("%s:%s", filepath.Base(contextDir), sha), true +} + +// imageURL is the URL for a docker repo +func parseImageReference(image string) (ref string, ok bool) { + imageURL, err := url.Parse(image) + if err != nil { + log.Debugf("Unable to parse image as url: %v", err) + return "", false + } + if imageURL.Scheme != "docker" { + log.Debugf("Ignoring non-docker ref '%s'", imageURL.String()) + return "", false + } + + return fmt.Sprintf("%s%s", imageURL.Host, imageURL.Path), true +} + +// imageURL is the directory where a `Dockerfile` should exist +func parseImageGithub(image string) (cloneURL *url.URL, ref string, path string, ok bool) { + re := regexp.MustCompile("^([^/@]+)/([^/@]+)(/([^@]*))?(@(.*))?$") + matches := re.FindStringSubmatch(image) + + if matches == nil { + return nil, "", "", false + } + + cloneURL, err := url.Parse(fmt.Sprintf("https://github.com/%s/%s", matches[1], matches[2])) + if err != nil { + log.Debugf("Unable to parse as URL: %v", err) + return nil, "", "", false + } + + resp, err := http.Head(cloneURL.String()) + if resp.StatusCode >= 400 || err != nil { + log.Debugf("Unable to HEAD URL %s status=%v err=%v", cloneURL.String(), resp.StatusCode, err) + return nil, "", "", false + } + + ref = matches[6] + if ref == "" { + ref = "master" + } + + path = matches[4] + if path == "" { + path = "." + } + + return cloneURL, ref, path, true +} diff --git a/actions/runner_test.go b/actions/runner_test.go new file mode 100644 index 00000000..cb620acc --- /dev/null +++ b/actions/runner_test.go @@ -0,0 +1,89 @@ +package actions + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/nektos/act/common" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestParseImageReference(t *testing.T) { + log.SetLevel(log.DebugLevel) + tables := []struct { + refIn string + refOut string + ok bool + }{ + {"docker://myhost.com/foo/bar", "myhost.com/foo/bar", true}, + {"docker://ubuntu", "ubuntu", true}, + {"docker://ubuntu:18.04", "ubuntu:18.04", true}, + {"docker://cibuilds/hugo:0.53", "cibuilds/hugo:0.53", true}, + {"http://google.com:8080", "", false}, + {"./foo", "", false}, + } + + for _, table := range tables { + refOut, ok := parseImageReference(table.refIn) + assert.Equal(t, table.refOut, refOut) + assert.Equal(t, table.ok, ok) + } + +} + +func TestParseImageLocal(t *testing.T) { + log.SetLevel(log.DebugLevel) + tables := []struct { + pathIn string + contextDir string + refTag string + ok bool + }{ + {"docker://myhost.com/foo/bar", "", "", false}, + {"http://google.com:8080", "", "", false}, + {"example/action1", "/example/action1", "action1:", true}, + } + + revision, _, err := common.FindGitRevision(".") + assert.Nil(t, err) + basedir, err := filepath.Abs("..") + assert.Nil(t, err) + for _, table := range tables { + contextDir, refTag, ok := parseImageLocal(basedir, table.pathIn) + assert.Equal(t, table.ok, ok, "ok match for %s", table.pathIn) + if ok { + assert.Equal(t, fmt.Sprintf("%s%s", basedir, table.contextDir), contextDir, "context dir doesn't match for %s", table.pathIn) + assert.Equal(t, fmt.Sprintf("%s%s", table.refTag, revision), refTag) + } + } + +} +func TestParseImageGithub(t *testing.T) { + log.SetLevel(log.DebugLevel) + tables := []struct { + image string + cloneURL string + ref string + path string + ok bool + }{ + {"nektos/act", "https://github.com/nektos/act", "master", ".", true}, + {"nektos/act/foo", "https://github.com/nektos/act", "master", "foo", true}, + {"nektos/act@xxxxx", "https://github.com/nektos/act", "xxxxx", ".", true}, + {"nektos/act/bar/baz@zzzzz", "https://github.com/nektos/act", "zzzzz", "bar/baz", true}, + {"nektos/zzzzundefinedzzzz", "", "", "", false}, + } + + for _, table := range tables { + cloneURL, ref, path, ok := parseImageGithub(table.image) + assert.Equal(t, table.ok, ok, "ok match for %s", table.image) + if ok { + assert.Equal(t, table.cloneURL, cloneURL.String()) + assert.Equal(t, table.ref, ref) + assert.Equal(t, table.path, path) + } + } + +} diff --git a/actions/types.go b/actions/types.go new file mode 100644 index 00000000..334503dc --- /dev/null +++ b/actions/types.go @@ -0,0 +1,56 @@ +package actions + +import ( + "context" +) + +// Workflows provides capabilities to work with the workflow file +type Workflows interface { + EventGrapher + EventLister + ActionRunner + EventRunner + Close() +} + +// EventGrapher to list the actions +type EventGrapher interface { + GraphEvent(eventName string) ([][]string, error) +} + +// EventLister to list the events +type EventLister interface { + ListEvents() []string +} + +// ActionRunner to run an action +type ActionRunner interface { + RunAction(ctx context.Context, dryrun bool, action string) error +} + +// EventRunner to run an event +type EventRunner interface { + RunEvent(ctx context.Context, dryrun bool, event string) error +} + +type workflowDef struct { + On string + Resolves []string +} + +type actionDef struct { + Needs []string + Uses string + Runs string + Args []string + Env map[string]string + Secrets []string +} + +type workflowsFile struct { + TempDir string + WorkingDir string + WorkflowPath string + Workflow map[string]workflowDef + Action map[string]actionDef +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 00000000..15824b3f --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,106 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/nektos/act/actions" + "github.com/nektos/act/common" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var verbose bool +var workflowPath string +var workingDir string +var list bool +var actionName string +var dryrun bool + +// Execute is the entry point to running the CLI +func Execute(ctx context.Context, version string) { + var rootCmd = &cobra.Command{ + Use: "act [event name to run]", + Short: "Run Github actions locally by specifying the event name (e.g. `push`) or an action name directly.", + Args: cobra.MaximumNArgs(1), + RunE: newRunAction(ctx), + Version: version, + SilenceUsage: true, + } + rootCmd.Flags().BoolVarP(&list, "list", "l", false, "list actions") + rootCmd.Flags().StringVarP(&actionName, "action", "a", "", "run action") + rootCmd.PersistentFlags().BoolVarP(&dryrun, "dryrun", "n", false, "dryrun mode") + rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose output") + rootCmd.PersistentFlags().StringVarP(&workflowPath, "file", "f", "./.github/main.workflow", "path to workflow file") + rootCmd.PersistentFlags().StringVarP(&workingDir, "directory", "C", ".", "working directory") + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } + +} + +func newRunAction(ctx context.Context) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + if verbose { + log.SetLevel(log.DebugLevel) + } + + workflows, err := actions.ParseWorkflows(workingDir, workflowPath) + if err != nil { + return err + } + + defer workflows.Close() + + if list { + return listEvents(workflows) + } + + if actionName != "" { + return workflows.RunAction(ctx, dryrun, actionName) + } + + if len(args) == 0 { + return workflows.RunEvent(ctx, dryrun, "push") + } + return workflows.RunEvent(ctx, dryrun, args[0]) + } +} + +func listEvents(workflows actions.Workflows) error { + eventNames := workflows.ListEvents() + for _, eventName := range eventNames { + graph, err := workflows.GraphEvent(eventName) + if err != nil { + return err + } + + drawings := make([]*common.Drawing, 0) + eventPen := common.NewPen(common.StyleDoubleLine, 91 /*34*/) + + drawings = append(drawings, eventPen.DrawBoxes(fmt.Sprintf("EVENT: %s", eventName))) + + actionPen := common.NewPen(common.StyleSingleLine, 96) + arrowPen := common.NewPen(common.StyleNoLine, 97) + drawings = append(drawings, arrowPen.DrawArrow()) + for i, stage := range graph { + if i > 0 { + drawings = append(drawings, arrowPen.DrawArrow()) + } + drawings = append(drawings, actionPen.DrawBoxes(stage...)) + } + + maxWidth := 0 + for _, d := range drawings { + if d.GetWidth() > maxWidth { + maxWidth = d.GetWidth() + } + } + + for _, d := range drawings { + d.Draw(os.Stdout, maxWidth) + } + } + return nil +} diff --git a/common/draw.go b/common/draw.go new file mode 100644 index 00000000..0d64e865 --- /dev/null +++ b/common/draw.go @@ -0,0 +1,144 @@ +package common + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Style is a specific style +type Style int + +// Styles +const ( + StyleDoubleLine = iota + StyleSingleLine + StyleDashedLine + StyleNoLine +) + +// NewPen creates a new pen +func NewPen(style Style, color int) *Pen { + bgcolor := 49 + if os.Getenv("CLICOLOR") == "0" { + color = 0 + bgcolor = 0 + } + return &Pen{ + style: style, + color: color, + bgcolor: bgcolor, + } +} + +type styleDef struct { + cornerTL string + cornerTR string + cornerBL string + cornerBR string + lineH string + lineV string +} + +var styleDefs = []styleDef{ + {"\u2554", "\u2557", "\u255a", "\u255d", "\u2550", "\u2551"}, + //{"\u250c", "\u2510", "\u2514", "\u2518", "\u2500", "\u2502"}, + {"\u256d", "\u256e", "\u2570", "\u256f", "\u2500", "\u2502"}, + {"\u250c", "\u2510", "\u2514", "\u2518", "\u254c", "\u254e"}, + {" ", " ", " ", " ", " ", " "}, +} + +// Pen struct +type Pen struct { + style Style + color int + bgcolor int +} + +// Drawing struct +type Drawing struct { + buf *strings.Builder + width int +} + +func (p *Pen) drawTopBars(buf io.Writer, labels ...string) { + style := styleDefs[p.style] + for _, label := range labels { + bar := strings.Repeat(style.lineH, len(label)+2) + fmt.Fprintf(buf, " ") + fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor) + fmt.Fprintf(buf, "%s%s%s", style.cornerTL, bar, style.cornerTR) + fmt.Fprintf(buf, "\x1b[%dm", 0) + } + fmt.Fprintf(buf, "\n") +} +func (p *Pen) drawBottomBars(buf io.Writer, labels ...string) { + style := styleDefs[p.style] + for _, label := range labels { + bar := strings.Repeat(style.lineH, len(label)+2) + fmt.Fprintf(buf, " ") + fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor) + fmt.Fprintf(buf, "%s%s%s", style.cornerBL, bar, style.cornerBR) + fmt.Fprintf(buf, "\x1b[%dm", 0) + } + fmt.Fprintf(buf, "\n") +} +func (p *Pen) drawLabels(buf io.Writer, labels ...string) { + style := styleDefs[p.style] + for _, label := range labels { + fmt.Fprintf(buf, " ") + fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor) + fmt.Fprintf(buf, "%s %s %s", style.lineV, label, style.lineV) + fmt.Fprintf(buf, "\x1b[%dm", 0) + } + fmt.Fprintf(buf, "\n") +} + +// DrawArrow between boxes +func (p *Pen) DrawArrow() *Drawing { + drawing := &Drawing{ + buf: new(strings.Builder), + width: 1, + } + fmt.Fprintf(drawing.buf, "\x1b[%dm", p.color) + fmt.Fprintf(drawing.buf, "\u2b07") + fmt.Fprintf(drawing.buf, "\x1b[%dm", 0) + return drawing +} + +// DrawBoxes to draw boxes +func (p *Pen) DrawBoxes(labels ...string) *Drawing { + width := 0 + for _, l := range labels { + width += len(l) + 2 + 2 + 1 + } + drawing := &Drawing{ + buf: new(strings.Builder), + width: width, + } + p.drawTopBars(drawing.buf, labels...) + p.drawLabels(drawing.buf, labels...) + p.drawBottomBars(drawing.buf, labels...) + + return drawing +} + +// Draw to writer +func (d *Drawing) Draw(writer io.Writer, centerOnWidth int) { + padSize := (centerOnWidth - d.GetWidth()) / 2 + if padSize < 0 { + padSize = 0 + } + for _, l := range strings.Split(d.buf.String(), "\n") { + if len(l) > 0 { + padding := strings.Repeat(" ", padSize) + fmt.Fprintf(writer, "%s%s\n", padding, l) + } + } +} + +// GetWidth of drawing +func (d *Drawing) GetWidth() int { + return d.width +} diff --git a/common/executor.go b/common/executor.go new file mode 100644 index 00000000..18c65285 --- /dev/null +++ b/common/executor.go @@ -0,0 +1,100 @@ +package common + +import ( + "fmt" + + log "github.com/sirupsen/logrus" +) + +// Warning that implements `error` but safe to ignore +type Warning struct { + Message string +} + +// Error the contract for error +func (w Warning) Error() string { + return w.Message +} + +// Warningf create a warning +func Warningf(format string, args ...interface{}) Warning { + w := Warning{ + Message: fmt.Sprintf(format, args...), + } + return w +} + +// Executor define contract for the steps of a workflow +type Executor func() error + +// Conditional define contract for the conditional predicate +type Conditional func() bool + +// NewPipelineExecutor creates a new executor from a series of other executors +func NewPipelineExecutor(executors ...Executor) Executor { + return func() error { + for _, executor := range executors { + if executor == nil { + continue + } + err := executor() + if err != nil { + switch err.(type) { + case Warning: + log.Warning(err.Error()) + return nil + default: + log.Debugf("%+v", err) + return err + } + } + } + return nil + } +} + +// NewConditionalExecutor creates a new executor based on conditions +func NewConditionalExecutor(conditional Conditional, trueExecutor Executor, falseExecutor Executor) Executor { + return func() error { + if conditional() { + if trueExecutor != nil { + return trueExecutor() + } + } else { + if falseExecutor != nil { + return falseExecutor() + } + } + return nil + } +} + +func executeWithChan(executor Executor, errChan chan error) { + errChan <- executor() +} + +// NewErrorExecutor creates a new executor that always errors out +func NewErrorExecutor(err error) Executor { + return func() error { + return err + } +} + +// NewParallelExecutor creates a new executor from a parallel of other executors +func NewParallelExecutor(executors ...Executor) Executor { + return func() error { + errChan := make(chan error) + + for _, executor := range executors { + go executeWithChan(executor, errChan) + } + + for i := 0; i < len(executors); i++ { + err := <-errChan + if err != nil { + return err + } + } + return nil + } +} diff --git a/common/executor_test.go b/common/executor_test.go new file mode 100644 index 00000000..73b0ea68 --- /dev/null +++ b/common/executor_test.go @@ -0,0 +1,84 @@ +package common + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewWorkflow(t *testing.T) { + assert := assert.New(t) + + // empty + emptyWorkflow := NewPipelineExecutor() + assert.Nil(emptyWorkflow()) + + // error case + errorWorkflow := NewErrorExecutor(fmt.Errorf("test error")) + assert.NotNil(errorWorkflow()) + + // multiple success case + runcount := 0 + successWorkflow := NewPipelineExecutor( + func() error { + runcount = runcount + 1 + return nil + }, + func() error { + runcount = runcount + 1 + return nil + }) + assert.Nil(successWorkflow()) + assert.Equal(2, runcount) +} + +func TestNewConditionalExecutor(t *testing.T) { + assert := assert.New(t) + + trueCount := 0 + falseCount := 0 + + err := NewConditionalExecutor(func() bool { + return false + }, func() error { + trueCount++ + return nil + }, func() error { + falseCount++ + return nil + })() + + assert.Nil(err) + assert.Equal(0, trueCount) + assert.Equal(1, falseCount) + + err = NewConditionalExecutor(func() bool { + return true + }, func() error { + trueCount++ + return nil + }, func() error { + falseCount++ + return nil + })() + + assert.Nil(err) + assert.Equal(1, trueCount) + assert.Equal(1, falseCount) +} + +func TestNewParallelExecutor(t *testing.T) { + assert := assert.New(t) + + count := 0 + emptyWorkflow := NewPipelineExecutor(func() error { + count++ + return nil + }) + + err := NewParallelExecutor(emptyWorkflow, emptyWorkflow)() + assert.Equal(2, count) + + assert.Nil(err) +} diff --git a/common/file.go b/common/file.go new file mode 100644 index 00000000..f2fbfd58 --- /dev/null +++ b/common/file.go @@ -0,0 +1,79 @@ +package common + +import ( + "fmt" + "io" + "os" +) + +// CopyFile copy file +func CopyFile(source string, dest string) (err error) { + sourcefile, err := os.Open(source) + if err != nil { + return err + } + + defer sourcefile.Close() + + destfile, err := os.Create(dest) + if err != nil { + return err + } + + defer destfile.Close() + + _, err = io.Copy(destfile, sourcefile) + if err == nil { + sourceinfo, err := os.Stat(source) + if err != nil { + _ = os.Chmod(dest, sourceinfo.Mode()) + } + + } + + return +} + +// CopyDir recursive copy of directory +func CopyDir(source string, dest string) (err error) { + + // get properties of source dir + sourceinfo, err := os.Stat(source) + if err != nil { + return err + } + + // create dest dir + + err = os.MkdirAll(dest, sourceinfo.Mode()) + if err != nil { + return err + } + + directory, _ := os.Open(source) + + objects, err := directory.Readdir(-1) + + for _, obj := range objects { + + sourcefilepointer := source + "/" + obj.Name() + + destinationfilepointer := dest + "/" + obj.Name() + + if obj.IsDir() { + // create sub-directories - recursively + err = CopyDir(sourcefilepointer, destinationfilepointer) + if err != nil { + fmt.Println(err) + } + } else { + // perform copy + err = CopyFile(sourcefilepointer, destinationfilepointer) + if err != nil { + fmt.Println(err) + } + } + + } + return +} diff --git a/common/git.go b/common/git.go new file mode 100644 index 00000000..33063f9d --- /dev/null +++ b/common/git.go @@ -0,0 +1,219 @@ +package common + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/go-ini/ini" + log "github.com/sirupsen/logrus" + git "gopkg.in/src-d/go-git.v4" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/yaml.v2" +) + +var cloneLock sync.Mutex + +// FindGitRevision get the current git revision +func FindGitRevision(file string) (shortSha string, sha string, err error) { + gitDir, err := findGitDirectory(file) + if err != nil { + return "", "", err + } + + head, err := findGitHead(file) + if err != nil { + return "", "", err + } + // load commitid ref + refBuf, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gitDir, head)) + if err != nil { + return "", "", err + } + return string(string(refBuf)[:7]), string(refBuf), nil +} + +// FindGitBranch get the current git branch +func FindGitBranch(file string) (string, error) { + head, err := findGitHead(file) + if err != nil { + return "", err + } + + // get branch name + branch := strings.TrimPrefix(head, "refs/heads/") + log.Debugf("Found branch: %s", branch) + return branch, nil +} + +func findGitHead(file string) (string, error) { + gitDir, err := findGitDirectory(file) + if err != nil { + return "", err + } + log.Debugf("Loading revision from git directory '%s'", gitDir) + + // load HEAD ref + headFile, err := os.Open(fmt.Sprintf("%s/HEAD", gitDir)) + if err != nil { + return "", err + } + defer func() { + headFile.Close() + }() + + headBuffer := new(bytes.Buffer) + headBuffer.ReadFrom(bufio.NewReader(headFile)) + head := make(map[string]string) + yaml.Unmarshal(headBuffer.Bytes(), head) + + log.Debugf("HEAD points to '%s'", head["ref"]) + + return head["ref"], nil +} + +// FindGithubRepo get the repo +func FindGithubRepo(file string) (string, error) { + url, err := findGitRemoteURL(file) + if err != nil { + return "", err + } + _, slug, err := findGitSlug(url) + return slug, err +} + +func findGitRemoteURL(file string) (string, error) { + gitDir, err := findGitDirectory(file) + if err != nil { + return "", err + } + log.Debugf("Loading slug from git directory '%s'", gitDir) + + gitconfig, err := ini.InsensitiveLoad(fmt.Sprintf("%s/config", gitDir)) + if err != nil { + return "", err + } + remote, err := gitconfig.GetSection("remote \"origin\"") + if err != nil { + return "", err + } + urlKey, err := remote.GetKey("url") + if err != nil { + return "", err + } + url := urlKey.String() + return url, nil +} + +func findGitSlug(url string) (string, string, error) { + codeCommitHTTPRegex := regexp.MustCompile(`^http(s?)://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`) + codeCommitSSHRegex := regexp.MustCompile(`ssh://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`) + httpRegex := regexp.MustCompile("^http(s?)://.*github.com.*/(.+)/(.+).git$") + sshRegex := regexp.MustCompile("github.com:(.+)/(.+).git$") + + if matches := codeCommitHTTPRegex.FindStringSubmatch(url); matches != nil { + return "CodeCommit", matches[3], nil + } else if matches := codeCommitSSHRegex.FindStringSubmatch(url); matches != nil { + return "CodeCommit", matches[2], nil + } else if matches := httpRegex.FindStringSubmatch(url); matches != nil { + return "GitHub", fmt.Sprintf("%s/%s", matches[2], matches[3]), nil + } else if matches := sshRegex.FindStringSubmatch(url); matches != nil { + return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil + } + return "", url, nil +} + +func findGitDirectory(fromFile string) (string, error) { + absPath, err := filepath.Abs(fromFile) + if err != nil { + return "", err + } + + log.Debugf("Searching for git directory in %s", absPath) + fi, err := os.Stat(absPath) + if err != nil { + return "", err + } + + var dir string + if fi.Mode().IsDir() { + dir = absPath + } else { + dir = path.Dir(absPath) + } + + gitPath := path.Join(dir, ".git") + fi, err = os.Stat(gitPath) + if err == nil && fi.Mode().IsDir() { + return gitPath, nil + } else if dir == "/" || dir == "C:\\" || dir == "c:\\" { + return "", errors.New("unable to find git repo") + } + + return findGitDirectory(filepath.Dir(dir)) + +} + +// NewGitCloneExecutorInput the input for the NewGitCloneExecutor +type NewGitCloneExecutorInput struct { + URL *url.URL + Ref string + Dir string + Logger *log.Entry + Dryrun bool +} + +// NewGitCloneExecutor creates an executor to clone git repos +func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor { + return func() error { + input.Logger.Infof("git clone '%s'", input.URL.String()) + input.Logger.Debugf(" cloning %s to %s", input.URL.String(), input.Dir) + + if input.Dryrun { + return nil + } + + cloneLock.Lock() + defer cloneLock.Unlock() + + r, err := git.PlainOpen(input.Dir) + if err != nil { + r, err = git.PlainClone(input.Dir, false, &git.CloneOptions{ + URL: input.URL.String(), + Progress: input.Logger.WriterLevel(log.DebugLevel), + }) + if err != nil { + return err + } + } + + w, err := r.Worktree() + if err != nil { + return err + } + + w.Pull(&git.PullOptions{}) + input.Logger.Debugf("Cloned %s to %s", input.URL.String(), input.Dir) + + err = w.Checkout(&git.CheckoutOptions{ + //Branch: plumbing.NewHash(ref), + Hash: plumbing.NewHash(input.Ref), + }) + if err != nil { + input.Logger.Error(err) + return err + } + + input.Logger.Debugf("Checked out %s", input.Ref) + return nil + } +} diff --git a/common/git_test.go b/common/git_test.go new file mode 100644 index 00000000..e5f4e9ea --- /dev/null +++ b/common/git_test.go @@ -0,0 +1,75 @@ +package common + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFindGitSlug(t *testing.T) { + assert := assert.New(t) + + var slugTests = []struct { + url string // input + provider string // expected result + slug string // expected result + }{ + {"https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name", "CodeCommit", "my-repo-name"}, + {"ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/my-repo", "CodeCommit", "my-repo"}, + {"git@github.com:nektos/act.git", "GitHub", "nektos/act"}, + {"https://github.com/nektos/act.git", "GitHub", "nektos/act"}, + {"http://github.com/nektos/act.git", "GitHub", "nektos/act"}, + {"http://myotherrepo.com/act.git", "", "http://myotherrepo.com/act.git"}, + } + + for _, tt := range slugTests { + provider, slug, err := findGitSlug(tt.url) + + assert.Nil(err) + assert.Equal(tt.provider, provider) + assert.Equal(tt.slug, slug) + } + +} + +func TestFindGitRemoteURL(t *testing.T) { + assert := assert.New(t) + + basedir, err := ioutil.TempDir("", "act-test") + defer os.RemoveAll(basedir) + + assert.Nil(err) + + err = gitCmd("init", basedir) + assert.Nil(err) + + remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name" + err = gitCmd("config", "-f", fmt.Sprintf("%s/.git/config", basedir), "--add", "remote.origin.url", remoteURL) + assert.Nil(err) + + u, err := findGitRemoteURL(basedir) + assert.Nil(err) + assert.Equal(remoteURL, u) +} + +func gitCmd(args ...string) error { + var stdout bytes.Buffer + cmd := exec.Command("git", args...) + cmd.Stdout = &stdout + cmd.Stderr = ioutil.Discard + + err := cmd.Run() + if exitError, ok := err.(*exec.ExitError); ok { + if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok { + return fmt.Errorf("Exit error %d", waitStatus.ExitStatus()) + } + return exitError + } + return nil +} diff --git a/container/docker_build.go b/container/docker_build.go new file mode 100644 index 00000000..0976b592 --- /dev/null +++ b/container/docker_build.go @@ -0,0 +1,106 @@ +package container + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/nektos/act/common" + log "github.com/sirupsen/logrus" +) + +// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function +type NewDockerBuildExecutorInput struct { + DockerExecutorInput + ContextDir string + ImageTag string +} + +// NewDockerBuildExecutor function to create a run executor for the container +func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor { + return func() error { + input.Logger.Infof("docker build -t %s %s", input.ImageTag, input.ContextDir) + if input.Dryrun { + return nil + } + + cli, err := client.NewClientWithOpts() + if err != nil { + return err + } + + input.Logger.Debugf("Building image from '%v'", input.ContextDir) + + tags := []string{input.ImageTag} + options := types.ImageBuildOptions{ + Tags: tags, + } + + buildContext, err := createBuildContext(input.ContextDir, "Dockerfile") + if err != nil { + return err + } + + defer buildContext.Close() + + input.Logger.Debugf("Creating image from context dir '%s' with tag '%s'", input.ContextDir, input.ImageTag) + resp, err := cli.ImageBuild(input.Ctx, buildContext, options) + input.logDockerResponse(resp.Body, err != nil) + if err != nil { + return err + } + return nil + } + +} +func createBuildContext(contextDir string, relDockerfile string) (io.ReadCloser, error) { + log.Debugf("Creating archive for build context dir '%s' with relative dockerfile '%s'", contextDir, relDockerfile) + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile = archive.CanonicalTarNameForPath(relDockerfile) + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + defer f.Close() + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return nil, err + } + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + compression := archive.Uncompressed + buildCtx, err := archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: compression, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return nil, err + } + + return buildCtx, nil +} diff --git a/container/docker_common.go b/container/docker_common.go new file mode 100644 index 00000000..10b0e3e9 --- /dev/null +++ b/container/docker_common.go @@ -0,0 +1,104 @@ +package container + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/sirupsen/logrus" +) + +// DockerExecutorInput common input params +type DockerExecutorInput struct { + Ctx context.Context + Logger *logrus.Entry + Dryrun bool +} + +type dockerMessage struct { + ID string `json:"id"` + Stream string `json:"stream"` + Error string `json:"error"` + ErrorDetail struct { + Message string + } + Status string `json:"status"` + Progress string `json:"progress"` +} + +func (i *DockerExecutorInput) logDockerOutput(dockerResponse io.Reader) error { + scanner := bufio.NewScanner(dockerResponse) + if i.Logger == nil { + return nil + } + for scanner.Scan() { + i.Logger.Infof(scanner.Text()) + } + return nil +} + +func (i *DockerExecutorInput) streamDockerOutput(dockerResponse io.Reader) error { + out := os.Stdout + go func() { + <-i.Ctx.Done() + fmt.Println() + }() + + _, err := io.Copy(out, dockerResponse) + return err +} + +func (i *DockerExecutorInput) writeLog(isError bool, format string, args ...interface{}) { + if i.Logger == nil { + return + } + if isError { + i.Logger.Errorf(format, args...) + } else { + i.Logger.Debugf(format, args...) + } + +} + +func (i *DockerExecutorInput) logDockerResponse(dockerResponse io.ReadCloser, isError bool) error { + if dockerResponse == nil { + return nil + } + defer dockerResponse.Close() + + scanner := bufio.NewScanner(dockerResponse) + msg := dockerMessage{} + for scanner.Scan() { + line := scanner.Bytes() + msg.ID = "" + msg.Stream = "" + msg.Error = "" + msg.ErrorDetail.Message = "" + msg.Status = "" + msg.Progress = "" + if err := json.Unmarshal(line, &msg); err == nil { + if msg.Error != "" { + return fmt.Errorf("%s", msg.Error) + } + + if msg.Status != "" { + if msg.Progress != "" { + i.writeLog(isError, "%s :: %s :: %s\n", msg.Status, msg.ID, msg.Progress) + } else { + i.writeLog(isError, "%s :: %s\n", msg.Status, msg.ID) + } + } else if msg.Stream != "" { + i.writeLog(isError, msg.Stream) + } else { + i.writeLog(false, "Unable to handle line: %s", string(line)) + } + } else { + i.writeLog(false, "Unable to unmarshal line [%s] ==> %v", string(line), err) + } + } + + return nil +} diff --git a/container/docker_pull.go b/container/docker_pull.go new file mode 100644 index 00000000..5cedcd43 --- /dev/null +++ b/container/docker_pull.go @@ -0,0 +1,55 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/nektos/act/common" +) + +// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function +type NewDockerPullExecutorInput struct { + DockerExecutorInput + Image string +} + +// NewDockerPullExecutor function to create a run executor for the container +func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor { + return func() error { + input.Logger.Infof("docker pull %v", input.Image) + + if input.Dryrun { + return nil + } + + imageRef := cleanImage(input.Image) + input.Logger.Debugf("pulling image '%v'", imageRef) + + cli, err := client.NewClientWithOpts() + if err != nil { + return err + } + + reader, err := cli.ImagePull(input.Ctx, imageRef, types.ImagePullOptions{}) + input.logDockerResponse(reader, err != nil) + if err != nil { + return err + } + return nil + + } + +} + +func cleanImage(image string) string { + imageParts := len(strings.Split(image, "/")) + if imageParts == 1 { + image = fmt.Sprintf("docker.io/library/%s", image) + } else if imageParts == 2 { + image = fmt.Sprintf("docker.io/%s", image) + } + + return image +} diff --git a/container/docker_pull_test.go b/container/docker_pull_test.go new file mode 100644 index 00000000..b8bffb85 --- /dev/null +++ b/container/docker_pull_test.go @@ -0,0 +1,29 @@ +package container + +import ( + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func init() { + log.SetLevel(log.DebugLevel) +} + +func TestCleanImage(t *testing.T) { + tables := []struct { + imageIn string + imageOut string + }{ + {"myhost.com/foo/bar", "myhost.com/foo/bar"}, + {"ubuntu", "docker.io/library/ubuntu"}, + {"ubuntu:18.04", "docker.io/library/ubuntu:18.04"}, + {"cibuilds/hugo:0.53", "docker.io/cibuilds/hugo:0.53"}, + } + + for _, table := range tables { + imageOut := cleanImage(table.imageIn) + assert.Equal(t, table.imageOut, imageOut) + } +} diff --git a/container/docker_run.go b/container/docker_run.go new file mode 100644 index 00000000..689b47aa --- /dev/null +++ b/container/docker_run.go @@ -0,0 +1,184 @@ +package container + +import ( + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/nektos/act/common" + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" +) + +// NewDockerRunExecutorInput the input for the NewDockerRunExecutor function +type NewDockerRunExecutorInput struct { + DockerExecutorInput + Image string + Entrypoint string + Cmd []string + WorkingDir string + Env []string + Binds []string + Content map[string]io.Reader + Volumes []string + Name string +} + +// NewDockerRunExecutor function to create a run executor for the container +func NewDockerRunExecutor(input NewDockerRunExecutorInput) common.Executor { + return func() error { + + input.Logger.Infof("docker run %s %s", input.Image, input.Cmd) + if input.Dryrun { + return nil + } + + cli, err := client.NewClientWithOpts() + if err != nil { + return err + } + + containerID, err := createContainer(input, cli) + if err != nil { + return err + } + defer removeContainer(input, cli, containerID) + + err = copyContentToContainer(input, cli, containerID) + if err != nil { + return err + } + + err = attachContainer(input, cli, containerID) + if err != nil { + return err + } + + err = startContainer(input, cli, containerID) + if err != nil { + return err + } + + return waitContainer(input, cli, containerID) + + } + +} + +func createContainer(input NewDockerRunExecutorInput, cli *client.Client) (string, error) { + isTerminal := terminal.IsTerminal(int(os.Stdout.Fd())) + + cmd := input.Cmd + if len(input.Cmd) == 1 { + cmd = strings.Split(cmd[0], " ") + } + + config := &container.Config{ + Image: input.Image, + Cmd: cmd, + WorkingDir: input.WorkingDir, + Env: input.Env, + Tty: isTerminal, + } + + if len(input.Volumes) > 0 { + config.Volumes = make(map[string]struct{}) + for _, vol := range input.Volumes { + config.Volumes[vol] = struct{}{} + } + } + + if input.Entrypoint != "" { + config.Entrypoint = []string{input.Entrypoint} + } + resp, err := cli.ContainerCreate(input.Ctx, config, &container.HostConfig{ + Binds: input.Binds, + }, nil, input.Name) + if err != nil { + return "", err + } + input.Logger.Debugf("Created container name=%s id=%v from image %v", input.Name, resp.ID, input.Image) + log.Debugf("ENV ==> %v", input.Env) + + return resp.ID, nil +} + +func removeContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) { + err := cli.ContainerRemove(context.Background(), containerID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + input.Logger.Errorf("%v", err) + } + + input.Logger.Debugf("Removed container: %v", containerID) +} + +func copyContentToContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error { + for dstPath, srcReader := range input.Content { + input.Logger.Debugf("Extracting content to '%s'", dstPath) + err := cli.CopyToContainer(input.Ctx, containerID, dstPath, srcReader, types.CopyToContainerOptions{}) + if err != nil { + return err + } + } + return nil +} + +func attachContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error { + out, err := cli.ContainerAttach(input.Ctx, containerID, types.ContainerAttachOptions{ + Stream: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + return err + } + isTerminal := terminal.IsTerminal(int(os.Stdout.Fd())) + if !isTerminal || os.Getenv("NORAW") != "" { + go input.logDockerOutput(out.Reader) + } else { + go input.streamDockerOutput(out.Reader) + } + return nil +} + +func startContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error { + input.Logger.Debugf("STARTING image=%s entrypoint=%s cmd=%v", input.Image, input.Entrypoint, input.Cmd) + + if err := cli.ContainerStart(input.Ctx, containerID, types.ContainerStartOptions{}); err != nil { + return err + } + + input.Logger.Debugf("Started container: %v", containerID) + return nil +} + +func waitContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error { + statusCh, errCh := cli.ContainerWait(input.Ctx, containerID, container.WaitConditionNotRunning) + var statusCode int64 + select { + case err := <-errCh: + if err != nil { + return err + } + case status := <-statusCh: + statusCode = status.StatusCode + } + + input.Logger.Debugf("Return status: %v", statusCode) + + if statusCode == 0 { + return nil + } else if statusCode == 78 { + return fmt.Errorf("exiting with `NEUTRAL`: 78") + } + + return fmt.Errorf("exit with `FAILURE`: %v", statusCode) +} diff --git a/example/.github/main.workflow b/example/.github/main.workflow new file mode 100644 index 00000000..caad8a58 --- /dev/null +++ b/example/.github/main.workflow @@ -0,0 +1,21 @@ +workflow "build-and-deploy" { + on = "push" + resolves = ["deploy"] +} + +action "build" { + uses = "./action1" + args = "echo 'build'" +} + +action "test" { + uses = "docker://ubuntu:18.04" + args = "echo 'test'" + needs = ["build"] +} + +action "deploy" { + uses = "./action2" + args = "echo 'deploy'" + needs = ["test"] +} \ No newline at end of file diff --git a/example/action1/Dockerfile b/example/action1/Dockerfile new file mode 100644 index 00000000..311a4a0b --- /dev/null +++ b/example/action1/Dockerfile @@ -0,0 +1 @@ +FROM ubuntu:18.04 \ No newline at end of file diff --git a/example/action2/Dockerfile b/example/action2/Dockerfile new file mode 100644 index 00000000..1154c849 --- /dev/null +++ b/example/action2/Dockerfile @@ -0,0 +1 @@ +FROM alpine:3.8 \ No newline at end of file diff --git a/main.go b/main.go new file mode 100644 index 00000000..2169056d --- /dev/null +++ b/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "context" + "os" + "os/signal" + + "github.com/nektos/act/cmd" +) + +var version string + +func main() { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + // trap Ctrl+C and call cancel on the context + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + defer func() { + signal.Stop(c) + cancel() + }() + go func() { + select { + case <-c: + cancel() + case <-ctx.Done(): + } + }() + + // run the command + cmd.Execute(ctx, version) +}