mirror of
https://code.forgejo.org/forgejo/runner.git
synced 2025-09-15 18:57:01 +00:00
fix: image credentials for services must not override container image credentials (#181)
- do not override username and password when looping over services - split prepareJobContainer out of startJobContainer - split getNetworkName out as it is used by both - add unit tests for prepareJobContainer - make containre.NewContainer mockable - add MockVariable helper Closes forgejo/runner#575 --- Note to reviewers: do not show whitespace change, the refactor will show in a minimal way. When the fix is reverted the tests fail as follows: ``` Diff: --- Expected +++ Actual @@ -81,4 +81,4 @@ Image: (string) (len=10) "some:image", - Username: (string) (len=17) "containerusername", - Password: (string) (len=17) "containerpassword", + Username: (string) (len=16) "service2username", + Password: (string) (len=16) "service2password", Entrypoint: ([]string) (len=3) { Test: TestStartJobContainer/Overlapping ``` Reviewed-on: https://code.forgejo.org/forgejo/act/pulls/181 Reviewed-by: Michael Kriese <michael.kriese@gmx.de> Co-authored-by: Earl Warren <contact@earl-warren.org> Co-committed-by: Earl Warren <contact@earl-warren.org>
This commit is contained in:
parent
6620cc1d18
commit
c2f91f63df
4 changed files with 346 additions and 151 deletions
|
@ -43,7 +43,7 @@ import (
|
|||
)
|
||||
|
||||
// NewContainer creates a reference to a container
|
||||
func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
var NewContainer = func(input *NewContainerInput) ExecutionsEnvironment {
|
||||
cr := new(containerReference)
|
||||
cr.input = input
|
||||
cr.toolCache = input.ToolCache
|
||||
|
|
|
@ -391,177 +391,188 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) startJobContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
image := rc.platformImage(ctx)
|
||||
rawLogger := logger.WithField("raw_output", true)
|
||||
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
|
||||
if rc.Config.LogOutput {
|
||||
rawLogger.Infof("%s", s)
|
||||
} else {
|
||||
rawLogger.Debugf("%s", s)
|
||||
}
|
||||
return true
|
||||
})
|
||||
func (rc *RunContext) getNetworkName(_ context.Context) (networkName string, createAndDeleteNetwork bool) {
|
||||
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
|
||||
networkName = string(rc.Config.ContainerNetworkMode)
|
||||
if networkName == "" {
|
||||
// if networkName is empty string, will create a new network for the containers.
|
||||
// and it will be removed after at last.
|
||||
networkName, createAndDeleteNetwork = rc.networkName()
|
||||
}
|
||||
return networkName, createAndDeleteNetwork
|
||||
}
|
||||
|
||||
username, password, err := rc.handleCredentials(ctx)
|
||||
func (rc *RunContext) prepareJobContainer(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
image := rc.platformImage(ctx)
|
||||
rawLogger := logger.WithField("raw_output", true)
|
||||
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
|
||||
if rc.Config.LogOutput {
|
||||
rawLogger.Infof("%s", s)
|
||||
} else {
|
||||
rawLogger.Debugf("%s", s)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
username, password, err := rc.handleCredentials(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle credentials: %s", err)
|
||||
}
|
||||
|
||||
logger.Infof("\U0001f680 Start image=%s", image)
|
||||
name := rc.jobContainerName()
|
||||
// For gitea, to support --volumes-from <container_name_or_id> in options.
|
||||
// We need to set the container name to the environment variable.
|
||||
rc.Env["JOB_CONTAINER_NAME"] = name
|
||||
|
||||
envList := make([]string, 0)
|
||||
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", rc.getToolCache(ctx)))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux"))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx)))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions
|
||||
|
||||
ext := container.LinuxContainerEnvironmentExtensions{}
|
||||
binds, mounts := rc.GetBindsAndMounts()
|
||||
|
||||
networkName, createAndDeleteNetwork := rc.getNetworkName(ctx)
|
||||
// add service containers
|
||||
for serviceID, spec := range rc.Run.Job().Services {
|
||||
// interpolate env
|
||||
interpolatedEnvs := make(map[string]string, len(spec.Env))
|
||||
for k, v := range spec.Env {
|
||||
interpolatedEnvs[k] = rc.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
envs := make([]string, 0, len(interpolatedEnvs))
|
||||
for k, v := range interpolatedEnvs {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
||||
for _, v := range spec.Cmd {
|
||||
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
||||
}
|
||||
|
||||
username, password, err := rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle credentials: %s", err)
|
||||
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
|
||||
}
|
||||
|
||||
logger.Infof("\U0001f680 Start image=%s", image)
|
||||
name := rc.jobContainerName()
|
||||
// For gitea, to support --volumes-from <container_name_or_id> in options.
|
||||
// We need to set the container name to the environment variable.
|
||||
rc.Env["JOB_CONTAINER_NAME"] = name
|
||||
interpolatedVolumes := make([]string, 0, len(spec.Volumes))
|
||||
for _, volume := range spec.Volumes {
|
||||
interpolatedVolumes = append(interpolatedVolumes, rc.ExprEval.Interpolate(ctx, volume))
|
||||
}
|
||||
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(interpolatedVolumes)
|
||||
|
||||
envList := make([]string, 0)
|
||||
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", rc.getToolCache(ctx)))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux"))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx)))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions
|
||||
|
||||
ext := container.LinuxContainerEnvironmentExtensions{}
|
||||
binds, mounts := rc.GetBindsAndMounts()
|
||||
|
||||
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
|
||||
networkName := string(rc.Config.ContainerNetworkMode)
|
||||
var createAndDeleteNetwork bool
|
||||
if networkName == "" {
|
||||
// if networkName is empty string, will create a new network for the containers.
|
||||
// and it will be removed after at last.
|
||||
networkName, createAndDeleteNetwork = rc.networkName()
|
||||
interpolatedPorts := make([]string, 0, len(spec.Ports))
|
||||
for _, port := range spec.Ports {
|
||||
interpolatedPorts = append(interpolatedPorts, rc.ExprEval.Interpolate(ctx, port))
|
||||
}
|
||||
exposedPorts, portBindings, err := nat.ParsePortSpecs(interpolatedPorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse service %s ports: %w", serviceID, err)
|
||||
}
|
||||
|
||||
// add service containers
|
||||
for serviceID, spec := range rc.Run.Job().Services {
|
||||
// interpolate env
|
||||
interpolatedEnvs := make(map[string]string, len(spec.Env))
|
||||
for k, v := range spec.Env {
|
||||
interpolatedEnvs[k] = rc.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
envs := make([]string, 0, len(interpolatedEnvs))
|
||||
for k, v := range interpolatedEnvs {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
||||
for _, v := range spec.Cmd {
|
||||
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
||||
}
|
||||
|
||||
username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
|
||||
}
|
||||
|
||||
interpolatedVolumes := make([]string, 0, len(spec.Volumes))
|
||||
for _, volume := range spec.Volumes {
|
||||
interpolatedVolumes = append(interpolatedVolumes, rc.ExprEval.Interpolate(ctx, volume))
|
||||
}
|
||||
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(interpolatedVolumes)
|
||||
|
||||
interpolatedPorts := make([]string, 0, len(spec.Ports))
|
||||
for _, port := range spec.Ports {
|
||||
interpolatedPorts = append(interpolatedPorts, rc.ExprEval.Interpolate(ctx, port))
|
||||
}
|
||||
exposedPorts, portBindings, err := nat.ParsePortSpecs(interpolatedPorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse service %s ports: %w", serviceID, err)
|
||||
}
|
||||
|
||||
serviceContainerName := createContainerName(rc.jobContainerName(), serviceID)
|
||||
c := container.NewContainer(&container.NewContainerInput{
|
||||
Name: serviceContainerName,
|
||||
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
|
||||
Username: username,
|
||||
Password: password,
|
||||
Cmd: interpolatedCmd,
|
||||
Env: envs,
|
||||
ToolCache: rc.getToolCache(ctx),
|
||||
Mounts: serviceMounts,
|
||||
Binds: serviceBinds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
NetworkMode: networkName,
|
||||
NetworkAliases: []string{serviceID},
|
||||
ExposedPorts: exposedPorts,
|
||||
PortBindings: portBindings,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
|
||||
JobOptions: rc.ExprEval.Interpolate(ctx, spec.Options),
|
||||
ConfigOptions: rc.Config.ContainerOptions,
|
||||
})
|
||||
rc.ServiceContainers = append(rc.ServiceContainers, c)
|
||||
}
|
||||
|
||||
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||
reuseJobContainer := func(ctx context.Context) bool {
|
||||
return rc.Config.ReuseContainers
|
||||
}
|
||||
|
||||
if rc.JobContainer != nil {
|
||||
return rc.JobContainer.Remove().IfNot(reuseJobContainer).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).IfNot(reuseJobContainer).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false)).IfNot(reuseJobContainer).
|
||||
Then(func(ctx context.Context) error {
|
||||
if len(rc.ServiceContainers) > 0 {
|
||||
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning services: %v", err)
|
||||
}
|
||||
if createAndDeleteNetwork {
|
||||
// clean network if it has been created by act
|
||||
// if using service containers
|
||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// so, we should remove the network at last.
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: nil,
|
||||
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
serviceContainerName := createContainerName(rc.jobContainerName(), serviceID)
|
||||
c := container.NewContainer(&container.NewContainerInput{
|
||||
Name: serviceContainerName,
|
||||
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
|
||||
Username: username,
|
||||
Password: password,
|
||||
Name: name,
|
||||
Env: envList,
|
||||
Cmd: interpolatedCmd,
|
||||
Env: envs,
|
||||
ToolCache: rc.getToolCache(ctx),
|
||||
Mounts: mounts,
|
||||
NetworkMode: networkName,
|
||||
NetworkAliases: []string{rc.Name},
|
||||
Binds: binds,
|
||||
Mounts: serviceMounts,
|
||||
Binds: serviceBinds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
NetworkMode: networkName,
|
||||
NetworkAliases: []string{serviceID},
|
||||
ExposedPorts: exposedPorts,
|
||||
PortBindings: portBindings,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
|
||||
JobOptions: rc.options(ctx),
|
||||
JobOptions: rc.ExprEval.Interpolate(ctx, spec.Options),
|
||||
ConfigOptions: rc.Config.ContainerOptions,
|
||||
})
|
||||
if rc.JobContainer == nil {
|
||||
return errors.New("Failed to create job container")
|
||||
rc.ServiceContainers = append(rc.ServiceContainers, c)
|
||||
}
|
||||
|
||||
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||
reuseJobContainer := func(ctx context.Context) bool {
|
||||
return rc.Config.ReuseContainers
|
||||
}
|
||||
|
||||
if rc.JobContainer != nil {
|
||||
return rc.JobContainer.Remove().IfNot(reuseJobContainer).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).IfNot(reuseJobContainer).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false)).IfNot(reuseJobContainer).
|
||||
Then(func(ctx context.Context) error {
|
||||
if len(rc.ServiceContainers) > 0 {
|
||||
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning services: %v", err)
|
||||
}
|
||||
if createAndDeleteNetwork {
|
||||
// clean network if it has been created by act
|
||||
// if using service containers
|
||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// so, we should remove the network at last.
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: nil,
|
||||
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Name: name,
|
||||
Env: envList,
|
||||
ToolCache: rc.getToolCache(ctx),
|
||||
Mounts: mounts,
|
||||
NetworkMode: networkName,
|
||||
NetworkAliases: []string{rc.Name},
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
|
||||
JobOptions: rc.options(ctx),
|
||||
ConfigOptions: rc.Config.ContainerOptions,
|
||||
})
|
||||
if rc.JobContainer == nil {
|
||||
return errors.New("Failed to create job container")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *RunContext) startJobContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
if err := rc.prepareJobContainer(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
networkName, _ := rc.getNetworkName(ctx)
|
||||
networkConfig := network.CreateOptions{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
|
|
|
@ -1,20 +1,26 @@
|
|||
package runner
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/nektos/act/pkg/testutils"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
log "github.com/sirupsen/logrus"
|
||||
assert "github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
@ -704,3 +710,171 @@ func Test_createSimpleContainerName(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareJobContainer(t *testing.T) {
|
||||
yaml := `
|
||||
on:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
job:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: some:image
|
||||
credentials:
|
||||
username: containerusername
|
||||
password: containerpassword
|
||||
services:
|
||||
service1:
|
||||
image: service1:image
|
||||
credentials:
|
||||
username: service1username
|
||||
password: service1password
|
||||
service2:
|
||||
image: service2:image
|
||||
credentials:
|
||||
username: service2username
|
||||
password: service2password
|
||||
steps:
|
||||
- run: echo ok
|
||||
`
|
||||
workflow, err := model.ReadWorkflow(strings.NewReader(yaml))
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
step actionStep
|
||||
inputs []container.NewContainerInput
|
||||
}{
|
||||
{
|
||||
name: "Overlapping",
|
||||
step: &stepActionRemote{
|
||||
Step: &model.Step{
|
||||
Uses: "org/repo/path@ref",
|
||||
},
|
||||
RunContext: &RunContext{
|
||||
Config: &Config{
|
||||
Workdir: "/my/workdir",
|
||||
},
|
||||
Run: &model.Run{
|
||||
JobID: "job",
|
||||
Workflow: workflow,
|
||||
},
|
||||
},
|
||||
env: map[string]string{},
|
||||
},
|
||||
inputs: []container.NewContainerInput{
|
||||
{
|
||||
Name: "WORKFLOW-JOB-service1-24d1b6963554cd6e1a2f9bfcd21b822bf5b42547db24667196ac45f89072fdd9",
|
||||
Image: "service1:image",
|
||||
Username: "service1username",
|
||||
Password: "service1password",
|
||||
Entrypoint: nil,
|
||||
Cmd: []string{},
|
||||
WorkingDir: "",
|
||||
Env: []string{},
|
||||
ToolCache: "/opt/hostedtoolcache",
|
||||
Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"},
|
||||
Mounts: map[string]string{},
|
||||
NetworkMode: "WORKFLOW_JOB-job-network",
|
||||
Privileged: false,
|
||||
UsernsMode: "",
|
||||
Platform: "",
|
||||
NetworkAliases: []string{"service1"},
|
||||
ExposedPorts: nat.PortSet{},
|
||||
PortBindings: nat.PortMap{},
|
||||
ConfigOptions: "",
|
||||
JobOptions: "",
|
||||
AutoRemove: false,
|
||||
ValidVolumes: []string{
|
||||
"WORKFLOW_JOB",
|
||||
"WORKFLOW_JOB-env",
|
||||
"/var/run/docker.sock",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "WORKFLOW-JOB-service2-7137cecabbdb942ae7bbfc8953de8f2a68e8dc9c92ad98cd6d095481b216f979",
|
||||
Image: "service2:image",
|
||||
Username: "service2username",
|
||||
Password: "service2password",
|
||||
Entrypoint: nil,
|
||||
Cmd: []string{},
|
||||
WorkingDir: "",
|
||||
Env: []string{},
|
||||
ToolCache: "/opt/hostedtoolcache",
|
||||
Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"},
|
||||
Mounts: map[string]string{},
|
||||
NetworkMode: "WORKFLOW_JOB-job-network",
|
||||
Privileged: false,
|
||||
UsernsMode: "",
|
||||
Platform: "",
|
||||
NetworkAliases: []string{"service2"},
|
||||
ExposedPorts: nat.PortSet{},
|
||||
PortBindings: nat.PortMap{},
|
||||
ConfigOptions: "",
|
||||
JobOptions: "",
|
||||
AutoRemove: false,
|
||||
ValidVolumes: []string{
|
||||
"WORKFLOW_JOB",
|
||||
"WORKFLOW_JOB-env",
|
||||
"/var/run/docker.sock",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "WORKFLOW_JOB",
|
||||
Image: "some:image",
|
||||
Username: "containerusername",
|
||||
Password: "containerpassword",
|
||||
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||
Cmd: nil,
|
||||
WorkingDir: "/my/workdir",
|
||||
Env: []string{},
|
||||
ToolCache: "/opt/hostedtoolcache",
|
||||
Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"},
|
||||
Mounts: map[string]string{
|
||||
"WORKFLOW_JOB": "/my/workdir",
|
||||
"WORKFLOW_JOB-env": "/var/run/act",
|
||||
},
|
||||
NetworkMode: "WORKFLOW_JOB-job-network",
|
||||
Privileged: false,
|
||||
UsernsMode: "",
|
||||
Platform: "",
|
||||
NetworkAliases: []string{""},
|
||||
ExposedPorts: nil,
|
||||
PortBindings: nil,
|
||||
ConfigOptions: "",
|
||||
JobOptions: "",
|
||||
AutoRemove: false,
|
||||
ValidVolumes: []string{
|
||||
"WORKFLOW_JOB",
|
||||
"WORKFLOW_JOB-env",
|
||||
"/var/run/docker.sock",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
containerInputs := make([]container.NewContainerInput, 0, 5)
|
||||
newContainer := container.NewContainer
|
||||
defer testutils.MockVariable(&container.NewContainer, func(input *container.NewContainerInput) container.ExecutionsEnvironment {
|
||||
c := *input
|
||||
c.Stdout = nil
|
||||
c.Stderr = nil
|
||||
c.Env = []string{}
|
||||
containerInputs = append(containerInputs, c)
|
||||
return newContainer(input)
|
||||
})()
|
||||
|
||||
ctx := context.Background()
|
||||
rc := testCase.step.getRunContext()
|
||||
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
|
||||
|
||||
require.NoError(t, rc.prepareJobContainer(ctx))
|
||||
slices.SortFunc(containerInputs, func(a, b container.NewContainerInput) int { return cmp.Compare(a.Name, b.Name) })
|
||||
assert.EqualValues(t, testCase.inputs, containerInputs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
10
act/testutils/mockvariable.go
Normal file
10
act/testutils/mockvariable.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2025 The Forgejo Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package testutils
|
||||
|
||||
func MockVariable[T any](p *T, v T) (reset func()) {
|
||||
old := *p
|
||||
*p = v
|
||||
return func() { *p = old }
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue