diff --git a/act/runner/action.go b/act/runner/action.go index 8ca5f510..98b880d1 100644 --- a/act/runner/action.go +++ b/act/runner/action.go @@ -428,7 +428,7 @@ func newStepContainer(ctx context.Context, step step, image string, cmd, entrypo envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx))) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp")) - binds, mounts, validVolumes := rc.GetBindsAndMounts() + binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) networkMode := fmt.Sprintf("container:%s", rc.jobContainerName()) if rc.IsHostEnv(ctx) { networkMode = "default" diff --git a/act/runner/job_executor.go b/act/runner/job_executor.go index 848e56f8..844d2fed 100644 --- a/act/runner/job_executor.go +++ b/act/runner/job_executor.go @@ -130,12 +130,8 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo logger.Errorf("Error while stop job container %s: %v", rc.jobContainerName(), err) } - if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" { - // clean network in docker mode only - // if the value of `ContainerNetworkMode` is empty string, - // it means that the network to which containers are connecting is created by `act_runner`, - // so, we should remove the network at last. - networkName, _ := rc.networkName() + if !rc.IsHostEnv(ctx) && rc.getNetworkCreated(ctx) { + networkName := rc.getNetworkName(ctx) logger.Debugf("Cleaning up network %s for job %s", networkName, rc.jobContainerName()) if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil { logger.Errorf("Error while cleaning network %s: %v", networkName, err) diff --git a/act/runner/run_context.go b/act/runner/run_context.go index 15efc312..84c03188 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -52,6 +52,9 @@ type RunContext struct { Masks []string cleanUpJobContainer common.Executor caller *caller // job calling this RunContext (reusable workflows) + randomName string + networkName string + networkCreated bool } func (rc *RunContext) AddMask(mask string) { @@ -97,14 +100,6 @@ func (rc *RunContext) jobContainerName() string { return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+common.Sha256(rc.Run.Workflow.Name), "JOB-"+rc.Name) } -// networkName return the name of the network which will be created by `act` automatically for job, -func (rc *RunContext) networkName() (string, bool) { - if len(rc.Run.Job().Services) > 0 || rc.Config.ContainerNetworkMode == "" { - return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true - } - return string(rc.Config.ContainerNetworkMode), false -} - func getDockerDaemonSocketMountPath(daemonPath string) string { if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 { scheme := daemonPath[:protoIndex] @@ -123,10 +118,25 @@ func getDockerDaemonSocketMountPath(daemonPath string) string { return daemonPath } -// Returns the binds and mounts for the container, resolving paths as appopriate -func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string, []string) { - name := rc.jobContainerName() +func (rc *RunContext) getInternalVolumeNames(ctx context.Context) []string { + return []string{ + rc.getInternalVolumeWorkdir(ctx), + rc.getInternalVolumeEnv(ctx), + } +} +func (rc *RunContext) getInternalVolumeWorkdir(ctx context.Context) string { + rc.ensureRandomName(ctx) + return rc.randomName +} + +func (rc *RunContext) getInternalVolumeEnv(ctx context.Context) string { + rc.ensureRandomName(ctx) + return fmt.Sprintf("%s-env", rc.randomName) +} + +// Returns the binds and mounts for the container, resolving paths as appopriate +func (rc *RunContext) GetBindsAndMounts(ctx context.Context) ([]string, map[string]string, []string) { if rc.Config.ContainerDaemonSocket == "" { rc.Config.ContainerDaemonSocket = "/var/run/docker.sock" } @@ -140,7 +150,7 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string, []string ext := container.LinuxContainerEnvironmentExtensions{} mounts := map[string]string{ - name + "-env": ext.GetActPath(), + rc.getInternalVolumeEnv(ctx): ext.GetActPath(), } if job := rc.Run.Job(); job != nil { @@ -168,14 +178,10 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string, []string } binds = append(binds, fmt.Sprintf("%s:%s%s", rc.Config.Workdir, ext.ToContainerPath(rc.Config.Workdir), bindModifiers)) } else { - mounts[name] = ext.ToContainerPath(rc.Config.Workdir) + mounts[rc.getInternalVolumeWorkdir(ctx)] = ext.ToContainerPath(rc.Config.Workdir) } - validVolumes := []string{ - name, - name + "-env", - getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket), - } + validVolumes := append(rc.getInternalVolumeNames(ctx), getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket)) validVolumes = append(validVolumes, rc.Config.ValidVolumes...) return binds, mounts, validVolumes } @@ -388,15 +394,44 @@ func (rc *RunContext) startHostEnvironment() common.Executor { } } -func (rc *RunContext) getNetworkName(_ context.Context) (networkName string, createAndDeleteNetwork bool) { - // specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network ) - networkName = string(rc.Config.ContainerNetworkMode) - if networkName == "" { - // if networkName is empty string, will create a new network for the containers. - // and it will be removed after at last. - networkName, createAndDeleteNetwork = rc.networkName() +func (rc *RunContext) ensureRandomName(ctx context.Context) { + if rc.randomName == "" { + logger := common.Logger(ctx) + if rc.Parent != nil { + // composite actions inherit their run context from the parent job + rootRunContext := rc + for rootRunContext.Parent != nil { + rootRunContext = rootRunContext.Parent + } + rootRunContext.ensureRandomName(ctx) + rc.randomName = rootRunContext.randomName + logger.Debugf("RunContext inherited random name %s from its parent", rc.Name, rc.randomName) + } else { + rc.randomName = common.MustRandName(16) + logger.Debugf("RunContext %s is assigned random name %s", rc.Name, rc.randomName) + } + } +} + +func (rc *RunContext) getNetworkCreated(ctx context.Context) bool { + rc.ensureNetworkName(ctx) + return rc.networkCreated +} + +func (rc *RunContext) getNetworkName(ctx context.Context) string { + rc.ensureNetworkName(ctx) + return rc.networkName +} + +func (rc *RunContext) ensureNetworkName(ctx context.Context) { + if rc.networkName == "" { + rc.ensureRandomName(ctx) + rc.networkName = string(rc.Config.ContainerNetworkMode) + if len(rc.Run.Job().Services) > 0 || rc.networkName == "" { + rc.networkName = fmt.Sprintf("WORKFLOW-%s", rc.randomName) + rc.networkCreated = true + } } - return networkName, createAndDeleteNetwork } var sanitizeNetworkAliasRegex = regexp.MustCompile("[^a-z0-9-]") @@ -443,9 +478,8 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions ext := container.LinuxContainerEnvironmentExtensions{} - binds, mounts, validVolumes := rc.GetBindsAndMounts() + binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) - networkName, createAndDeleteNetwork := rc.getNetworkName(ctx) // add service containers for serviceID, spec := range rc.Run.Job().Services { // interpolate env @@ -498,7 +532,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { Privileged: rc.Config.Privileged, UsernsMode: rc.Config.UsernsMode, Platform: rc.Config.ContainerArchitecture, - NetworkMode: networkName, + NetworkMode: rc.getNetworkName(ctx), NetworkAliases: []string{sanitizeNetworkAlias(ctx, serviceID)}, ExposedPorts: exposedPorts, PortBindings: portBindings, @@ -521,7 +555,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { if rc.JobContainer != nil { return rc.JobContainer.Remove().IfNot(reuseJobContainer). - Then(container.NewDockerVolumesRemoveExecutor([]string{rc.jobContainerName(), rc.jobContainerName() + "-env"})).IfNot(reuseJobContainer). + Then(container.NewDockerVolumesRemoveExecutor(rc.getInternalVolumeNames(ctx))).IfNot(reuseJobContainer). Then(func(ctx context.Context) error { if len(rc.ServiceContainers) > 0 { logger.Infof("Cleaning up services for job %s", rc.JobName) @@ -529,13 +563,9 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { logger.Errorf("Error while cleaning services: %v", err) } } - if createAndDeleteNetwork { - // clean network if it has been created by act - // if using service containers - // it means that the network to which containers are connecting is created by `act_runner`, - // so, we should remove the network at last. - logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName) - if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil { + if rc.getNetworkCreated(ctx) { + logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, rc.getNetworkName(ctx)) + if err := container.NewDockerNetworkRemoveExecutor(rc.getNetworkName(ctx))(ctx); err != nil { logger.Errorf("Error while cleaning network: %v", err) } } @@ -556,7 +586,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { Env: envList, ToolCache: rc.getToolCache(ctx), Mounts: mounts, - NetworkMode: networkName, + NetworkMode: rc.getNetworkName(ctx), NetworkAliases: []string{sanitizeNetworkAlias(ctx, rc.Name)}, Binds: binds, Stdout: logWriter, @@ -581,7 +611,6 @@ func (rc *RunContext) startJobContainer() common.Executor { if err := rc.prepareJobContainer(ctx); err != nil { return err } - networkName, _ := rc.getNetworkName(ctx) networkConfig := network.CreateOptions{ Driver: "bridge", Scope: "local", @@ -591,8 +620,8 @@ func (rc *RunContext) startJobContainer() common.Executor { rc.pullServicesImages(rc.Config.ForcePull), rc.JobContainer.Pull(rc.Config.ForcePull), rc.stopJobContainer(), - container.NewDockerNetworkCreateExecutor(networkName, &networkConfig).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers. - rc.startServiceContainers(networkName), + container.NewDockerNetworkCreateExecutor(rc.getNetworkName(ctx), &networkConfig).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers. + rc.startServiceContainers(rc.getNetworkName(ctx)), rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop), rc.JobContainer.Start(false), rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{ diff --git a/act/runner/run_context_test.go b/act/runner/run_context_test.go index 09506d47..296afd1b 100644 --- a/act/runner/run_context_test.go +++ b/act/runner/run_context_test.go @@ -219,7 +219,7 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) { config := testcase.rc.Config config.Workdir = testcase.name config.BindWorkdir = bindWorkDir - gotbind, gotmount, _ := rctemplate.GetBindsAndMounts() + gotbind, gotmount, _ := rctemplate.GetBindsAndMounts(t.Context()) // Name binds/mounts are either/or if config.BindWorkdir { @@ -229,7 +229,7 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) { } assert.Contains(t, gotbind, fullBind) } else { - mountkey := testcase.rc.jobContainerName() + mountkey := testcase.rc.getInternalVolumeWorkdir(t.Context()) assert.EqualValues(t, testcase.wantmount, gotmount[mountkey]) } }) @@ -271,7 +271,7 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) { rc.Run.JobID = "job1" rc.Run.Workflow.Jobs = map[string]*model.Job{"job1": job} - gotbind, gotmount, _ := rc.GetBindsAndMounts() + gotbind, gotmount, _ := rc.GetBindsAndMounts(t.Context()) if len(testcase.wantbind) > 0 { assert.Contains(t, gotbind, testcase.wantbind) @@ -650,6 +650,88 @@ func TestRunContext_CreateSimpleContainerName(t *testing.T) { } } +func TestRunContext_ensureNetworkName(t *testing.T) { + t.Run("CreateNetworkForServices", func(t *testing.T) { + yaml := ` +on: + push: + +jobs: + job: + runs-on: docker + container: + image: some:image + services: + service1: + image: service1:image + steps: + - run: echo ok +` + workflow, err := model.ReadWorkflow(strings.NewReader(yaml), true) + require.NoError(t, err) + + rc := &RunContext{ + Config: &Config{ + ContainerNetworkMode: "host", + }, + Run: &model.Run{ + JobID: "job", + Workflow: workflow, + }, + } + + rc.ensureNetworkName(t.Context()) + assert.True(t, rc.getNetworkCreated(t.Context())) + assert.True(t, strings.HasPrefix(rc.getNetworkName(t.Context()), "WORKFLOW-"), rc.getNetworkName(t.Context())) + }) + + yaml := ` +on: + push: + +jobs: + job: + runs-on: docker + container: + image: some:image + steps: + - run: echo ok +` + workflow, err := model.ReadWorkflow(strings.NewReader(yaml), true) + require.NoError(t, err) + + run := &model.Run{ + JobID: "job", + Workflow: workflow, + } + + t.Run("CreateNetworkIfEmptyNetworkMode", func(t *testing.T) { + rc := &RunContext{ + Config: &Config{ + ContainerNetworkMode: "", + }, + Run: run, + } + + rc.ensureNetworkName(t.Context()) + assert.True(t, rc.getNetworkCreated(t.Context())) + assert.True(t, strings.HasPrefix(rc.getNetworkName(t.Context()), "WORKFLOW-"), rc.getNetworkName(t.Context())) + }) + + t.Run("FixedNetworkIfSetByNetworkMode", func(t *testing.T) { + rc := &RunContext{ + Config: &Config{ + ContainerNetworkMode: "host", + }, + Run: run, + } + + rc.ensureNetworkName(t.Context()) + assert.False(t, rc.getNetworkCreated(t.Context())) + assert.Equal(t, "host", rc.getNetworkName(t.Context())) + }) +} + func TestRunContext_SanitizeNetworkAlias(t *testing.T) { same := "same" assert.Equal(t, same, sanitizeNetworkAlias(t.Context(), same)) @@ -712,21 +794,16 @@ jobs: }, inputs: []container.NewContainerInput{ { - Name: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB", - Image: "some:image", - Username: "containerusername", - Password: "containerpassword", - Entrypoint: []string{"tail", "-f", "/dev/null"}, - Cmd: nil, - WorkingDir: "/my/workdir", - Env: []string{}, - ToolCache: "/opt/hostedtoolcache", - Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, - Mounts: map[string]string{ - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB": "/my/workdir", - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-env": "/var/run/act", - }, - NetworkMode: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-job-network", + Name: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB", + Image: "some:image", + Username: "containerusername", + Password: "containerpassword", + Entrypoint: []string{"tail", "-f", "/dev/null"}, + Cmd: nil, + WorkingDir: "/my/workdir", + Env: []string{}, + ToolCache: "/opt/hostedtoolcache", + Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, Privileged: false, UsernsMode: "", Platform: "", @@ -735,11 +812,6 @@ jobs: PortBindings: nil, ConfigOptions: "", JobOptions: "", - ValidVolumes: []string{ - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB", - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-env", - "/var/run/docker.sock", - }, }, { Name: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca49599-fe7f4c0058dbd2161ebe4aafa71cd83bd96ee19d3ca8043d5e4bc477a664a80c", @@ -752,8 +824,6 @@ jobs: Env: []string{}, ToolCache: "/opt/hostedtoolcache", Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, - Mounts: map[string]string{}, - NetworkMode: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-job-network", Privileged: false, UsernsMode: "", Platform: "", @@ -774,8 +844,6 @@ jobs: Env: []string{}, ToolCache: "/opt/hostedtoolcache", Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, - Mounts: map[string]string{}, - NetworkMode: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-job-network", Privileged: false, UsernsMode: "", Platform: "", @@ -808,7 +876,24 @@ jobs: require.NoError(t, rc.prepareJobContainer(ctx)) slices.SortFunc(containerInputs, func(a, b container.NewContainerInput) int { return cmp.Compare(a.Username, b.Username) }) + jobContainerInput := containerInputs[0] + require.Equal(t, "containerusername", jobContainerInput.Username) + require.NotEmpty(t, jobContainerInput.NetworkMode) + for source := range jobContainerInput.Mounts { + assert.Contains(t, jobContainerInput.ValidVolumes, source) + } for i := 0; i < len(containerInputs); i++ { + + assert.Equal(t, jobContainerInput.NetworkMode, containerInputs[i].NetworkMode, containerInputs[i].Username) + containerInputs[i].NetworkMode = "" + + if strings.HasPrefix(containerInputs[i].Username, "service") { + assert.Empty(t, containerInputs[i].Mounts) + assert.Empty(t, containerInputs[i].ValidVolumes) + } + containerInputs[i].Mounts = nil + containerInputs[i].ValidVolumes = nil + assert.EqualValues(t, testCase.inputs[i], containerInputs[i], containerInputs[i].Username) } }) @@ -855,3 +940,18 @@ func Test_waitForServiceContainer(t *testing.T) { m.AssertExpectations(t) }) } + +func TestRunContext_ensureRandomName(t *testing.T) { + parent := &RunContext{ + Name: "parentname", + } + rc := &RunContext{ + Name: "runname", + Parent: parent, + } + + parent.ensureRandomName(t.Context()) + assert.NotEmpty(t, parent.randomName) + rc.ensureRandomName(t.Context()) + assert.Equal(t, parent.randomName, rc.randomName) +} diff --git a/act/runner/step_docker.go b/act/runner/step_docker.go index 055098a8..24cb467d 100644 --- a/act/runner/step_docker.go +++ b/act/runner/step_docker.go @@ -110,7 +110,7 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd, e envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx))) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp")) - binds, mounts, validVolumes := rc.GetBindsAndMounts() + binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) stepContainer := ContainerNewContainer(&container.NewContainerInput{ Cmd: cmd, Entrypoint: entrypoint,