diff --git a/act/model/workflow.go b/act/model/workflow.go index 82082feb..3462d288 100644 --- a/act/model/workflow.go +++ b/act/model/workflow.go @@ -11,6 +11,7 @@ import ( "slices" "strconv" "strings" + "sync" "code.forgejo.org/forgejo/runner/v9/act/common" "code.forgejo.org/forgejo/runner/v9/act/schema" @@ -217,7 +218,9 @@ type Job struct { Uses string `yaml:"uses"` With map[string]any `yaml:"with"` RawSecrets yaml.Node `yaml:"secrets"` - Result string + + Result string + ResultMutex sync.Mutex } // Strategy for the job diff --git a/act/runner/job_executor.go b/act/runner/job_executor.go index d407b379..3cb4f206 100644 --- a/act/runner/job_executor.go +++ b/act/runner/job_executor.go @@ -162,6 +162,11 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo func setJobResult(ctx context.Context, info jobInfo, rc *RunContext, success bool) { logger := common.Logger(ctx) + // As we're reading the matrix build's status (`rc.Run.Job().Result`), it's possible for it change in another + // goroutine running `setJobResult` and invoking `.result(...)`. Prevent concurrent execution of `setJobResult`... + rc.Run.Job().ResultMutex.Lock() + defer rc.Run.Job().ResultMutex.Unlock() + jobResult := "success" // we have only one result for a whole matrix build, so we need // to keep an existing result state if we run a matrix diff --git a/act/runner/job_executor_test.go b/act/runner/job_executor_test.go index 6e8a452e..3915b971 100644 --- a/act/runner/job_executor_test.go +++ b/act/runner/job_executor_test.go @@ -5,7 +5,9 @@ import ( "fmt" "io" "slices" + "sync" "testing" + "time" "code.forgejo.org/forgejo/runner/v9/act/common" "code.forgejo.org/forgejo/runner/v9/act/container" @@ -333,3 +335,74 @@ func TestJobExecutorNewJobExecutor(t *testing.T) { }) } } + +func TestSetJobResultConcurrency(t *testing.T) { + jim := &jobInfoMock{} + job := model.Job{ + Result: "success", + } + // Distinct RunContext objects are used to replicate realistic setJobResult in matrix build + rc1 := &RunContext{ + Run: &model.Run{ + JobID: "test", + Workflow: &model.Workflow{ + Jobs: map[string]*model.Job{ + "test": &job, + }, + }, + }, + } + rc2 := &RunContext{ + Run: &model.Run{ + JobID: "test", + Workflow: &model.Workflow{ + Jobs: map[string]*model.Job{ + "test": &job, + }, + }, + }, + } + + jim.On("matrix").Return(map[string]interface{}{ + "python": []string{"3.10", "3.11", "3.12"}, + }) + + // Synthesize a race condition in setJobResult where, by reading data from the job matrix earlier and then + // performing unsynchronzied writes to the same shared data structure, it can overwrite a failure status. + // + // Goroutine 1: Start marking job as success + // (artificially suspended + // by result() mock) + // Goroutine 2: Mark job as failure + // Goroutine 1: Finish marking job as success + // + // Correct behavior: Job is marked as a failure + // Bug behavior: Job is marked as a success + + var lastResult string + jim.On("result", mock.Anything).Run(func(args mock.Arguments) { + result := args.String(0) + // Artificially suspend the "success" case so that the failure case races past it. + if result == "success" { + time.Sleep(1 * time.Second) + } + job.Result = result + lastResult = result + }) + + var wg sync.WaitGroup + wg.Add(2) + // Goroutine 1, mark as success: + go func() { + defer wg.Done() + setJobResult(t.Context(), jim, rc1, true) + }() + // Goroutine 2, mark as failure: + go func() { + defer wg.Done() + setJobResult(t.Context(), jim, rc2, false) + }() + wg.Wait() + + assert.Equal(t, "failure", lastResult) +}