From f392ec4caa14df49faa38bc5113a5160bb5366dc Mon Sep 17 00:00:00 2001 From: Earl Warren Date: Thu, 14 Aug 2025 10:08:50 +0200 Subject: [PATCH 1/6] chore: refactor GetBindsAndMounts to have a context.Context arg --- act/runner/action.go | 2 +- act/runner/run_context.go | 4 ++-- act/runner/run_context_test.go | 4 ++-- act/runner/step_docker.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/act/runner/action.go b/act/runner/action.go index 8ca5f510..98b880d1 100644 --- a/act/runner/action.go +++ b/act/runner/action.go @@ -428,7 +428,7 @@ func newStepContainer(ctx context.Context, step step, image string, cmd, entrypo envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx))) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp")) - binds, mounts, validVolumes := rc.GetBindsAndMounts() + binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) networkMode := fmt.Sprintf("container:%s", rc.jobContainerName()) if rc.IsHostEnv(ctx) { networkMode = "default" diff --git a/act/runner/run_context.go b/act/runner/run_context.go index 15efc312..a4c6a893 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -124,7 +124,7 @@ func getDockerDaemonSocketMountPath(daemonPath string) string { } // Returns the binds and mounts for the container, resolving paths as appopriate -func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string, []string) { +func (rc *RunContext) GetBindsAndMounts(ctx context.Context) ([]string, map[string]string, []string) { name := rc.jobContainerName() if rc.Config.ContainerDaemonSocket == "" { @@ -443,7 +443,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions ext := container.LinuxContainerEnvironmentExtensions{} - binds, mounts, validVolumes := rc.GetBindsAndMounts() + binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) networkName, createAndDeleteNetwork := rc.getNetworkName(ctx) // add service containers diff --git a/act/runner/run_context_test.go b/act/runner/run_context_test.go index 09506d47..0f70e12d 100644 --- a/act/runner/run_context_test.go +++ b/act/runner/run_context_test.go @@ -219,7 +219,7 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) { config := testcase.rc.Config config.Workdir = testcase.name config.BindWorkdir = bindWorkDir - gotbind, gotmount, _ := rctemplate.GetBindsAndMounts() + gotbind, gotmount, _ := rctemplate.GetBindsAndMounts(t.Context()) // Name binds/mounts are either/or if config.BindWorkdir { @@ -271,7 +271,7 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) { rc.Run.JobID = "job1" rc.Run.Workflow.Jobs = map[string]*model.Job{"job1": job} - gotbind, gotmount, _ := rc.GetBindsAndMounts() + gotbind, gotmount, _ := rc.GetBindsAndMounts(t.Context()) if len(testcase.wantbind) > 0 { assert.Contains(t, gotbind, testcase.wantbind) diff --git a/act/runner/step_docker.go b/act/runner/step_docker.go index 055098a8..24cb467d 100644 --- a/act/runner/step_docker.go +++ b/act/runner/step_docker.go @@ -110,7 +110,7 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd, e envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx))) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp")) - binds, mounts, validVolumes := rc.GetBindsAndMounts() + binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) stepContainer := ContainerNewContainer(&container.NewContainerInput{ Cmd: cmd, Entrypoint: entrypoint, From 8a8e01522d249399992a29cd4ecb9594cd69d416 Mon Sep 17 00:00:00 2001 From: Earl Warren Date: Thu, 14 Aug 2025 10:21:08 +0200 Subject: [PATCH 2/6] chore: refactor network name creation - networkName becomes ensureNetworkName and stores the results in the run context instead of returning it - the getNetworkName and getNetworkCreated accessors are used instead of local variables --- act/runner/job_executor.go | 8 +--- act/runner/run_context.go | 55 +++++++++++------------ act/runner/run_context_test.go | 82 ++++++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 35 deletions(-) diff --git a/act/runner/job_executor.go b/act/runner/job_executor.go index 848e56f8..844d2fed 100644 --- a/act/runner/job_executor.go +++ b/act/runner/job_executor.go @@ -130,12 +130,8 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo logger.Errorf("Error while stop job container %s: %v", rc.jobContainerName(), err) } - if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" { - // clean network in docker mode only - // if the value of `ContainerNetworkMode` is empty string, - // it means that the network to which containers are connecting is created by `act_runner`, - // so, we should remove the network at last. - networkName, _ := rc.networkName() + if !rc.IsHostEnv(ctx) && rc.getNetworkCreated(ctx) { + networkName := rc.getNetworkName(ctx) logger.Debugf("Cleaning up network %s for job %s", networkName, rc.jobContainerName()) if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil { logger.Errorf("Error while cleaning network %s: %v", networkName, err) diff --git a/act/runner/run_context.go b/act/runner/run_context.go index a4c6a893..f4cacda8 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -52,6 +52,8 @@ type RunContext struct { Masks []string cleanUpJobContainer common.Executor caller *caller // job calling this RunContext (reusable workflows) + networkName string + networkCreated bool } func (rc *RunContext) AddMask(mask string) { @@ -97,14 +99,6 @@ func (rc *RunContext) jobContainerName() string { return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+common.Sha256(rc.Run.Workflow.Name), "JOB-"+rc.Name) } -// networkName return the name of the network which will be created by `act` automatically for job, -func (rc *RunContext) networkName() (string, bool) { - if len(rc.Run.Job().Services) > 0 || rc.Config.ContainerNetworkMode == "" { - return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true - } - return string(rc.Config.ContainerNetworkMode), false -} - func getDockerDaemonSocketMountPath(daemonPath string) string { if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 { scheme := daemonPath[:protoIndex] @@ -388,15 +382,24 @@ func (rc *RunContext) startHostEnvironment() common.Executor { } } -func (rc *RunContext) getNetworkName(_ context.Context) (networkName string, createAndDeleteNetwork bool) { - // specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network ) - networkName = string(rc.Config.ContainerNetworkMode) - if networkName == "" { - // if networkName is empty string, will create a new network for the containers. - // and it will be removed after at last. - networkName, createAndDeleteNetwork = rc.networkName() +func (rc *RunContext) getNetworkCreated(ctx context.Context) bool { + rc.ensureNetworkName(ctx) + return rc.networkCreated +} + +func (rc *RunContext) getNetworkName(ctx context.Context) string { + rc.ensureNetworkName(ctx) + return rc.networkName +} + +func (rc *RunContext) ensureNetworkName(ctx context.Context) { + if rc.networkName == "" { + rc.networkName = string(rc.Config.ContainerNetworkMode) + if len(rc.Run.Job().Services) > 0 || rc.networkName == "" { + rc.networkName = fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID) + rc.networkCreated = true + } } - return networkName, createAndDeleteNetwork } var sanitizeNetworkAliasRegex = regexp.MustCompile("[^a-z0-9-]") @@ -445,7 +448,6 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { ext := container.LinuxContainerEnvironmentExtensions{} binds, mounts, validVolumes := rc.GetBindsAndMounts(ctx) - networkName, createAndDeleteNetwork := rc.getNetworkName(ctx) // add service containers for serviceID, spec := range rc.Run.Job().Services { // interpolate env @@ -498,7 +500,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { Privileged: rc.Config.Privileged, UsernsMode: rc.Config.UsernsMode, Platform: rc.Config.ContainerArchitecture, - NetworkMode: networkName, + NetworkMode: rc.getNetworkName(ctx), NetworkAliases: []string{sanitizeNetworkAlias(ctx, serviceID)}, ExposedPorts: exposedPorts, PortBindings: portBindings, @@ -529,13 +531,9 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { logger.Errorf("Error while cleaning services: %v", err) } } - if createAndDeleteNetwork { - // clean network if it has been created by act - // if using service containers - // it means that the network to which containers are connecting is created by `act_runner`, - // so, we should remove the network at last. - logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName) - if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil { + if rc.getNetworkCreated(ctx) { + logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, rc.getNetworkName(ctx)) + if err := container.NewDockerNetworkRemoveExecutor(rc.getNetworkName(ctx))(ctx); err != nil { logger.Errorf("Error while cleaning network: %v", err) } } @@ -556,7 +554,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { Env: envList, ToolCache: rc.getToolCache(ctx), Mounts: mounts, - NetworkMode: networkName, + NetworkMode: rc.getNetworkName(ctx), NetworkAliases: []string{sanitizeNetworkAlias(ctx, rc.Name)}, Binds: binds, Stdout: logWriter, @@ -581,7 +579,6 @@ func (rc *RunContext) startJobContainer() common.Executor { if err := rc.prepareJobContainer(ctx); err != nil { return err } - networkName, _ := rc.getNetworkName(ctx) networkConfig := network.CreateOptions{ Driver: "bridge", Scope: "local", @@ -591,8 +588,8 @@ func (rc *RunContext) startJobContainer() common.Executor { rc.pullServicesImages(rc.Config.ForcePull), rc.JobContainer.Pull(rc.Config.ForcePull), rc.stopJobContainer(), - container.NewDockerNetworkCreateExecutor(networkName, &networkConfig).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers. - rc.startServiceContainers(networkName), + container.NewDockerNetworkCreateExecutor(rc.getNetworkName(ctx), &networkConfig).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers. + rc.startServiceContainers(rc.getNetworkName(ctx)), rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop), rc.JobContainer.Start(false), rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{ diff --git a/act/runner/run_context_test.go b/act/runner/run_context_test.go index 0f70e12d..1b92a3a8 100644 --- a/act/runner/run_context_test.go +++ b/act/runner/run_context_test.go @@ -650,6 +650,88 @@ func TestRunContext_CreateSimpleContainerName(t *testing.T) { } } +func TestRunContext_ensureNetworkName(t *testing.T) { + t.Run("CreateNetworkForServices", func(t *testing.T) { + yaml := ` +on: + push: + +jobs: + job: + runs-on: docker + container: + image: some:image + services: + service1: + image: service1:image + steps: + - run: echo ok +` + workflow, err := model.ReadWorkflow(strings.NewReader(yaml), true) + require.NoError(t, err) + + rc := &RunContext{ + Config: &Config{ + ContainerNetworkMode: "host", + }, + Run: &model.Run{ + JobID: "job", + Workflow: workflow, + }, + } + + rc.ensureNetworkName(t.Context()) + assert.True(t, rc.getNetworkCreated(t.Context())) + assert.True(t, strings.HasPrefix(rc.getNetworkName(t.Context()), "WORKFLOW-"), rc.getNetworkName(t.Context())) + }) + + yaml := ` +on: + push: + +jobs: + job: + runs-on: docker + container: + image: some:image + steps: + - run: echo ok +` + workflow, err := model.ReadWorkflow(strings.NewReader(yaml), true) + require.NoError(t, err) + + run := &model.Run{ + JobID: "job", + Workflow: workflow, + } + + t.Run("CreateNetworkIfEmptyNetworkMode", func(t *testing.T) { + rc := &RunContext{ + Config: &Config{ + ContainerNetworkMode: "", + }, + Run: run, + } + + rc.ensureNetworkName(t.Context()) + assert.True(t, rc.getNetworkCreated(t.Context())) + assert.True(t, strings.HasPrefix(rc.getNetworkName(t.Context()), "WORKFLOW-"), rc.getNetworkName(t.Context())) + }) + + t.Run("FixedNetworkIfSetByNetworkMode", func(t *testing.T) { + rc := &RunContext{ + Config: &Config{ + ContainerNetworkMode: "host", + }, + Run: run, + } + + rc.ensureNetworkName(t.Context()) + assert.False(t, rc.getNetworkCreated(t.Context())) + assert.Equal(t, "host", rc.getNetworkName(t.Context())) + }) +} + func TestRunContext_SanitizeNetworkAlias(t *testing.T) { same := "same" assert.Equal(t, same, sanitizeNetworkAlias(t.Context(), same)) From 023427115f737fe314b874769f003bfb4052c531 Mon Sep 17 00:00:00 2001 From: Earl Warren Date: Thu, 14 Aug 2025 10:30:46 +0200 Subject: [PATCH 3/6] chore: refactor internal volume names accessors - getInternalVolumeWorkdir is used instead of rc.jobContainerName() - getInternalVolumeEnv is used insted of rc.jobContainerName() + "-env" - getInternalVolumeNames is used when all internal volumes are needed --- act/runner/run_context.go | 29 +++++++++++++++++++---------- act/runner/run_context_test.go | 2 +- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/act/runner/run_context.go b/act/runner/run_context.go index f4cacda8..c7760de3 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -117,10 +117,23 @@ func getDockerDaemonSocketMountPath(daemonPath string) string { return daemonPath } +func (rc *RunContext) getInternalVolumeNames(ctx context.Context) []string { + return []string{ + rc.getInternalVolumeWorkdir(ctx), + rc.getInternalVolumeEnv(ctx), + } +} + +func (rc *RunContext) getInternalVolumeWorkdir(ctx context.Context) string { + return rc.jobContainerName() +} + +func (rc *RunContext) getInternalVolumeEnv(ctx context.Context) string { + return fmt.Sprintf("%s-env", rc.jobContainerName()) +} + // Returns the binds and mounts for the container, resolving paths as appopriate func (rc *RunContext) GetBindsAndMounts(ctx context.Context) ([]string, map[string]string, []string) { - name := rc.jobContainerName() - if rc.Config.ContainerDaemonSocket == "" { rc.Config.ContainerDaemonSocket = "/var/run/docker.sock" } @@ -134,7 +147,7 @@ func (rc *RunContext) GetBindsAndMounts(ctx context.Context) ([]string, map[stri ext := container.LinuxContainerEnvironmentExtensions{} mounts := map[string]string{ - name + "-env": ext.GetActPath(), + rc.getInternalVolumeEnv(ctx): ext.GetActPath(), } if job := rc.Run.Job(); job != nil { @@ -162,14 +175,10 @@ func (rc *RunContext) GetBindsAndMounts(ctx context.Context) ([]string, map[stri } binds = append(binds, fmt.Sprintf("%s:%s%s", rc.Config.Workdir, ext.ToContainerPath(rc.Config.Workdir), bindModifiers)) } else { - mounts[name] = ext.ToContainerPath(rc.Config.Workdir) + mounts[rc.getInternalVolumeWorkdir(ctx)] = ext.ToContainerPath(rc.Config.Workdir) } - validVolumes := []string{ - name, - name + "-env", - getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket), - } + validVolumes := append(rc.getInternalVolumeNames(ctx), getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket)) validVolumes = append(validVolumes, rc.Config.ValidVolumes...) return binds, mounts, validVolumes } @@ -523,7 +532,7 @@ func (rc *RunContext) prepareJobContainer(ctx context.Context) error { if rc.JobContainer != nil { return rc.JobContainer.Remove().IfNot(reuseJobContainer). - Then(container.NewDockerVolumesRemoveExecutor([]string{rc.jobContainerName(), rc.jobContainerName() + "-env"})).IfNot(reuseJobContainer). + Then(container.NewDockerVolumesRemoveExecutor(rc.getInternalVolumeNames(ctx))).IfNot(reuseJobContainer). Then(func(ctx context.Context) error { if len(rc.ServiceContainers) > 0 { logger.Infof("Cleaning up services for job %s", rc.JobName) diff --git a/act/runner/run_context_test.go b/act/runner/run_context_test.go index 1b92a3a8..9d167a0f 100644 --- a/act/runner/run_context_test.go +++ b/act/runner/run_context_test.go @@ -229,7 +229,7 @@ func TestRunContext_GetBindsAndMounts(t *testing.T) { } assert.Contains(t, gotbind, fullBind) } else { - mountkey := testcase.rc.jobContainerName() + mountkey := testcase.rc.getInternalVolumeWorkdir(t.Context()) assert.EqualValues(t, testcase.wantmount, gotmount[mountkey]) } }) From a197fea4ba6e14b9582b909c4becbef47041eb35 Mon Sep 17 00:00:00 2001 From: Earl Warren Date: Thu, 14 Aug 2025 10:46:28 +0200 Subject: [PATCH 4/6] feat: assign a unique random name to each run context If the run context has a parent, both share the same unique random name. A composite action does not have a run context of its own, it re-uses the run context of the job that calls it: this is when a parent is used and needed. There may be any level of parent / child relationship and ensureRandom name recursively look for the first parent with a non empty random. --- act/runner/run_context.go | 20 ++++++++++++++++++++ act/runner/run_context_test.go | 15 +++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/act/runner/run_context.go b/act/runner/run_context.go index c7760de3..a1db2867 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -52,6 +52,7 @@ type RunContext struct { Masks []string cleanUpJobContainer common.Executor caller *caller // job calling this RunContext (reusable workflows) + randomName string networkName string networkCreated bool } @@ -391,6 +392,25 @@ func (rc *RunContext) startHostEnvironment() common.Executor { } } +func (rc *RunContext) ensureRandomName(ctx context.Context) { + if rc.randomName == "" { + logger := common.Logger(ctx) + if rc.Parent != nil { + // composite actions inherit their run context from the parent job + rootRunContext := rc + for rootRunContext.Parent != nil { + rootRunContext = rootRunContext.Parent + } + rootRunContext.ensureRandomName(ctx) + rc.randomName = rootRunContext.randomName + logger.Debugf("RunContext inherited random name %s from its parent", rc.Name, rc.randomName) + } else { + rc.randomName = common.MustRandName(16) + logger.Debugf("RunContext %s is assigned random name %s", rc.Name, rc.randomName) + } + } +} + func (rc *RunContext) getNetworkCreated(ctx context.Context) bool { rc.ensureNetworkName(ctx) return rc.networkCreated diff --git a/act/runner/run_context_test.go b/act/runner/run_context_test.go index 9d167a0f..d03a1b72 100644 --- a/act/runner/run_context_test.go +++ b/act/runner/run_context_test.go @@ -937,3 +937,18 @@ func Test_waitForServiceContainer(t *testing.T) { m.AssertExpectations(t) }) } + +func TestRunContext_ensureRandomName(t *testing.T) { + parent := &RunContext{ + Name: "parentname", + } + rc := &RunContext{ + Name: "runname", + Parent: parent, + } + + parent.ensureRandomName(t.Context()) + assert.NotEmpty(t, parent.randomName) + rc.ensureRandomName(t.Context()) + assert.Equal(t, parent.randomName, rc.randomName) +} From 10c26c53a3904f58230d1c188546acbf772343d1 Mon Sep 17 00:00:00 2001 From: Earl Warren Date: Thu, 14 Aug 2025 11:14:20 +0200 Subject: [PATCH 5/6] chore: do not hardcode names in TestRunContext_PrepareJobContainer Replace asserting hard coded names with assertions on how the services and the job container relate. It slightly improves logic coverage and makes the test insensitive to how network and volume names are created. - compare the network names of the services and the job container to be equal, demonstrating they can communicate - verify the mounts and valid volumes of services to be empty - verify the internal mounts of the job container to be valid volumes --- act/runner/run_context_test.go | 51 ++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/act/runner/run_context_test.go b/act/runner/run_context_test.go index d03a1b72..296afd1b 100644 --- a/act/runner/run_context_test.go +++ b/act/runner/run_context_test.go @@ -794,21 +794,16 @@ jobs: }, inputs: []container.NewContainerInput{ { - Name: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB", - Image: "some:image", - Username: "containerusername", - Password: "containerpassword", - Entrypoint: []string{"tail", "-f", "/dev/null"}, - Cmd: nil, - WorkingDir: "/my/workdir", - Env: []string{}, - ToolCache: "/opt/hostedtoolcache", - Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, - Mounts: map[string]string{ - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB": "/my/workdir", - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-env": "/var/run/act", - }, - NetworkMode: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-job-network", + Name: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB", + Image: "some:image", + Username: "containerusername", + Password: "containerpassword", + Entrypoint: []string{"tail", "-f", "/dev/null"}, + Cmd: nil, + WorkingDir: "/my/workdir", + Env: []string{}, + ToolCache: "/opt/hostedtoolcache", + Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, Privileged: false, UsernsMode: "", Platform: "", @@ -817,11 +812,6 @@ jobs: PortBindings: nil, ConfigOptions: "", JobOptions: "", - ValidVolumes: []string{ - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB", - "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-env", - "/var/run/docker.sock", - }, }, { Name: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca49599-fe7f4c0058dbd2161ebe4aafa71cd83bd96ee19d3ca8043d5e4bc477a664a80c", @@ -834,8 +824,6 @@ jobs: Env: []string{}, ToolCache: "/opt/hostedtoolcache", Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, - Mounts: map[string]string{}, - NetworkMode: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-job-network", Privileged: false, UsernsMode: "", Platform: "", @@ -856,8 +844,6 @@ jobs: Env: []string{}, ToolCache: "/opt/hostedtoolcache", Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, - Mounts: map[string]string{}, - NetworkMode: "WORKFLOW-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855_JOB-job-network", Privileged: false, UsernsMode: "", Platform: "", @@ -890,7 +876,24 @@ jobs: require.NoError(t, rc.prepareJobContainer(ctx)) slices.SortFunc(containerInputs, func(a, b container.NewContainerInput) int { return cmp.Compare(a.Username, b.Username) }) + jobContainerInput := containerInputs[0] + require.Equal(t, "containerusername", jobContainerInput.Username) + require.NotEmpty(t, jobContainerInput.NetworkMode) + for source := range jobContainerInput.Mounts { + assert.Contains(t, jobContainerInput.ValidVolumes, source) + } for i := 0; i < len(containerInputs); i++ { + + assert.Equal(t, jobContainerInput.NetworkMode, containerInputs[i].NetworkMode, containerInputs[i].Username) + containerInputs[i].NetworkMode = "" + + if strings.HasPrefix(containerInputs[i].Username, "service") { + assert.Empty(t, containerInputs[i].Mounts) + assert.Empty(t, containerInputs[i].ValidVolumes) + } + containerInputs[i].Mounts = nil + containerInputs[i].ValidVolumes = nil + assert.EqualValues(t, testCase.inputs[i], containerInputs[i], containerInputs[i].Username) } }) From c85eb0e6477d394da5115b530c9deab9b4ae0247 Mon Sep 17 00:00:00 2001 From: Earl Warren Date: Thu, 14 Aug 2025 11:21:51 +0200 Subject: [PATCH 6/6] fix: use a unique random name to derive network and volumes for a job When a reusable workflow is called twice in the same workflow in parallel, it may require a dedicated network to run (for instance if it spawns services) and will always require unique volumes to mount the workdir and the env. There really is no way to guarantee a unique name derived from the job name etc. Instead, a random name is set and used as a base for both the internal volumes and the dedicated network (if any). --- act/runner/run_context.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/act/runner/run_context.go b/act/runner/run_context.go index a1db2867..84c03188 100644 --- a/act/runner/run_context.go +++ b/act/runner/run_context.go @@ -126,11 +126,13 @@ func (rc *RunContext) getInternalVolumeNames(ctx context.Context) []string { } func (rc *RunContext) getInternalVolumeWorkdir(ctx context.Context) string { - return rc.jobContainerName() + rc.ensureRandomName(ctx) + return rc.randomName } func (rc *RunContext) getInternalVolumeEnv(ctx context.Context) string { - return fmt.Sprintf("%s-env", rc.jobContainerName()) + rc.ensureRandomName(ctx) + return fmt.Sprintf("%s-env", rc.randomName) } // Returns the binds and mounts for the container, resolving paths as appopriate @@ -423,9 +425,10 @@ func (rc *RunContext) getNetworkName(ctx context.Context) string { func (rc *RunContext) ensureNetworkName(ctx context.Context) { if rc.networkName == "" { + rc.ensureRandomName(ctx) rc.networkName = string(rc.Config.ContainerNetworkMode) if len(rc.Run.Job().Services) > 0 || rc.networkName == "" { - rc.networkName = fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID) + rc.networkName = fmt.Sprintf("WORKFLOW-%s", rc.randomName) rc.networkCreated = true } }