1
0
Fork 0
mirror of https://code.forgejo.org/forgejo/runner.git synced 2025-08-06 17:40:58 +00:00

feat!: remove support for using an artifact server via CLI (#199)

Refs https://code.forgejo.org/forgejo/runner/pulls/740

Reviewed-on: https://code.forgejo.org/forgejo/act/pulls/199
Reviewed-by: Michael Kriese <michael.kriese@gmx.de>
Co-authored-by: Earl Warren <contact@earl-warren.org>
Co-committed-by: Earl Warren <contact@earl-warren.org>
This commit is contained in:
Earl Warren 2025-07-27 20:35:48 +00:00 committed by earl-warren
parent 1b0c31121a
commit 30ea23384a
4 changed files with 0 additions and 982 deletions

View file

@ -1,318 +0,0 @@
package artifacts
import (
"context"
"encoding/json"
"fmt"
"io"
"io/fs"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/julienschmidt/httprouter"
"github.com/nektos/act/pkg/common"
)
type FileContainerResourceURL struct {
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
}
type NamedFileContainerResourceURL struct {
Name string `json:"name"`
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
}
type NamedFileContainerResourceURLResponse struct {
Count int `json:"count"`
Value []NamedFileContainerResourceURL `json:"value"`
}
type ContainerItem struct {
Path string `json:"path"`
ItemType string `json:"itemType"`
ContentLocation string `json:"contentLocation"`
}
type ContainerItemResponse struct {
Value []ContainerItem `json:"value"`
}
type ResponseMessage struct {
Message string `json:"message"`
}
type WritableFile interface {
io.WriteCloser
}
type WriteFS interface {
OpenWritable(name string) (WritableFile, error)
OpenAppendable(name string) (WritableFile, error)
}
type readWriteFSImpl struct {
}
func (fwfs readWriteFSImpl) Open(name string) (fs.File, error) {
return os.Open(name)
}
func (fwfs readWriteFSImpl) OpenWritable(name string) (WritableFile, error) {
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
return nil, err
}
return os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
}
func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) {
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
return nil, err
}
file, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o644)
if err != nil {
return nil, err
}
_, err = file.Seek(0, io.SeekEnd)
if err != nil {
return nil, err
}
return file, nil
}
var gzipExtension = ".gz__"
func safeResolve(baseDir string, relPath string) string {
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
}
func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
runID := params.ByName("runId")
json, err := json.Marshal(FileContainerResourceURL{
FileContainerResourceURL: fmt.Sprintf("http://%s/upload/%s", req.Host, runID),
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.PUT("/upload/:runId", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
itemPath := req.URL.Query().Get("itemPath")
runID := params.ByName("runId")
if req.Header.Get("Content-Encoding") == "gzip" {
itemPath += gzipExtension
}
safeRunPath := safeResolve(baseDir, runID)
safePath := safeResolve(safeRunPath, itemPath)
file, err := func() (WritableFile, error) {
contentRange := req.Header.Get("Content-Range")
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
return fsys.OpenAppendable(safePath)
}
return fsys.OpenWritable(safePath)
}()
if err != nil {
panic(err)
}
defer file.Close()
writer, ok := file.(io.Writer)
if !ok {
panic("File is not writable")
}
if req.Body == nil {
panic("No body given")
}
_, err = io.Copy(writer, req.Body)
if err != nil {
panic(err)
}
json, err := json.Marshal(ResponseMessage{
Message: "success",
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.PATCH("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
json, err := json.Marshal(ResponseMessage{
Message: "success",
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
}
func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
runID := params.ByName("runId")
safePath := safeResolve(baseDir, runID)
entries, err := fs.ReadDir(fsys, safePath)
if err != nil {
panic(err)
}
var list []NamedFileContainerResourceURL
for _, entry := range entries {
list = append(list, NamedFileContainerResourceURL{
Name: entry.Name(),
FileContainerResourceURL: fmt.Sprintf("http://%s/download/%s", req.Host, runID),
})
}
json, err := json.Marshal(NamedFileContainerResourceURLResponse{
Count: len(list),
Value: list,
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
container := params.ByName("container")
itemPath := req.URL.Query().Get("itemPath")
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
var files []ContainerItem
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, err error) error {
if !entry.IsDir() {
rel, err := filepath.Rel(safePath, path)
if err != nil {
panic(err)
}
// if it was upload as gzip
rel = strings.TrimSuffix(rel, gzipExtension)
path := filepath.Join(itemPath, rel)
rel = filepath.ToSlash(rel)
path = filepath.ToSlash(path)
files = append(files, ContainerItem{
Path: path,
ItemType: "file",
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
})
}
return nil
})
if err != nil {
panic(err)
}
json, err := json.Marshal(ContainerItemResponse{
Value: files,
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.GET("/artifact/*path", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
path := params.ByName("path")[1:]
safePath := safeResolve(baseDir, path)
file, err := fsys.Open(safePath)
if err != nil {
// try gzip file
file, err = fsys.Open(safePath + gzipExtension)
if err != nil {
panic(err)
}
w.Header().Add("Content-Encoding", "gzip")
}
_, err = io.Copy(w, file)
if err != nil {
panic(err)
}
})
}
func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc {
serverContext, cancel := context.WithCancel(ctx)
logger := common.Logger(serverContext)
if artifactPath == "" {
return cancel
}
router := httprouter.New()
logger.Debugf("Artifacts base path '%s'", artifactPath)
fsys := readWriteFSImpl{}
uploads(router, artifactPath, fsys)
downloads(router, artifactPath, fsys)
server := &http.Server{
Addr: net.JoinHostPort(addr, port),
ReadHeaderTimeout: 2 * time.Second,
Handler: router,
}
// run server
go func() {
logger.Infof("Start server on http://%s", server.Addr)
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logger.Fatalf("http listener: %v", err)
}
}()
// wait for cancel to gracefully shutdown server
go func() {
<-serverContext.Done()
if err := server.Shutdown(ctx); err != nil {
logger.Errorf("Failed shutdown gracefully - force shutdown: %v", err)
server.Close()
}
}()
return cancel
}

View file

@ -1,395 +0,0 @@
package artifacts
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strings"
"testing"
"testing/fstest"
"github.com/julienschmidt/httprouter"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/nektos/act/pkg/model"
"github.com/nektos/act/pkg/runner"
)
type writableMapFile struct {
fstest.MapFile
}
func (f *writableMapFile) Write(data []byte) (int, error) {
f.Data = data
return len(data), nil
}
func (f *writableMapFile) Close() error {
return nil
}
type writeMapFS struct {
fstest.MapFS
}
func (fsys writeMapFS) OpenWritable(name string) (WritableFile, error) {
var file = &writableMapFile{
MapFile: fstest.MapFile{
Data: []byte("content2"),
},
}
fsys.MapFS[name] = &file.MapFile
return file, nil
}
func (fsys writeMapFS) OpenAppendable(name string) (WritableFile, error) {
var file = &writableMapFile{
MapFile: fstest.MapFile{
Data: []byte("content2"),
},
}
fsys.MapFS[name] = &file.MapFile
return file, nil
}
func TestNewArtifactUploadPrepare(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := FileContainerResourceURL{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("http://localhost/upload/1", response.FileContainerResourceURL)
}
func TestArtifactUploadBlob(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
}
func TestFinalizeArtifactUpload(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
}
func TestListArtifacts(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/1/file.txt": {
Data: []byte(""),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
response := NamedFileContainerResourceURLResponse{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal(1, response.Count)
assert.Equal("file.txt", response.Value[0].Name)
assert.Equal("http://localhost/download/1", response.Value[0].FileContainerResourceURL)
}
func TestListArtifactContainer(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/1/some/file": {
Data: []byte(""),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
response := ContainerItemResponse{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal(1, len(response.Value))
assert.Equal("some/file", response.Value[0].Path)
assert.Equal("file", response.Value[0].ItemType)
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
}
func TestDownloadArtifactFile(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/1/some/file": {
Data: []byte("content"),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
data := rr.Body.Bytes()
assert.Equal("content", string(data))
}
type TestJobFileInfo struct {
workdir string
workflowPath string
eventName string
errorMessage string
platforms map[string]string
containerArchitecture string
}
var (
artifactsPath = path.Join(os.TempDir(), "test-artifacts")
artifactsAddr = "127.0.0.1"
artifactsPort = "12345"
)
func TestArtifactFlow(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx := context.Background()
cancel := Serve(ctx, artifactsPath, artifactsAddr, artifactsPort)
defer cancel()
platforms := map[string]string{
"ubuntu-latest": "node:16-buster", // Don't use node:16-buster-slim because it doesn't have curl command, which is used in the tests
}
tables := []TestJobFileInfo{
{"testdata", "upload-and-download", "push", "", platforms, ""},
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
}
log.SetLevel(log.DebugLevel)
for _, table := range tables {
runTestJobFile(ctx, t, table)
}
}
func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
t.Run(tjfi.workflowPath, func(t *testing.T) {
fmt.Printf("::group::%s\n", tjfi.workflowPath)
if err := os.RemoveAll(artifactsPath); err != nil {
panic(err)
}
workdir, err := filepath.Abs(tjfi.workdir)
assert.Nil(t, err, workdir)
fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath)
runnerConfig := &runner.Config{
Workdir: workdir,
BindWorkdir: false,
EventName: tjfi.eventName,
Platforms: tjfi.platforms,
ReuseContainers: false,
ContainerArchitecture: tjfi.containerArchitecture,
GitHubInstance: "github.com",
ArtifactServerPath: artifactsPath,
ArtifactServerAddr: artifactsAddr,
ArtifactServerPort: artifactsPort,
}
runner, err := runner.New(runnerConfig)
assert.Nil(t, err, tjfi.workflowPath)
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true, false)
assert.Nil(t, err, fullWorkflowPath)
plan, err := planner.PlanEvent(tjfi.eventName)
if err == nil {
err = runner.NewPlanExecutor(plan)(ctx)
if tjfi.errorMessage == "" {
assert.Nil(t, err, fullWorkflowPath)
} else {
assert.Error(t, err, tjfi.errorMessage)
}
} else {
assert.Nil(t, plan)
}
fmt.Println("::endgroup::")
})
}
func TestMkdirFsImplSafeResolve(t *testing.T) {
assert := assert.New(t)
baseDir := "/foo/bar"
tests := map[string]struct {
input string
want string
}{
"simple": {input: "baz", want: "/foo/bar/baz"},
"nested": {input: "baz/blue", want: "/foo/bar/baz/blue"},
"dots in middle": {input: "baz/../../blue", want: "/foo/bar/blue"},
"leading dots": {input: "../../parent", want: "/foo/bar/parent"},
"root path": {input: "/root", want: "/foo/bar/root"},
"root": {input: "/", want: "/foo/bar"},
"empty": {input: "", want: "/foo/bar"},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
})
}
}
func TestDownloadArtifactFileUnsafePath(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/some/file": {
Data: []byte("content"),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/artifact/2/../../some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
data := rr.Body.Bytes()
assert.Equal("content", string(data))
}
func TestArtifactUploadBlobUnsafePath(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=../../some/file", strings.NewReader("content"))
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
}

View file

@ -1,39 +0,0 @@
name: "GHSL-2023-0004"
on: push
jobs:
test-artifacts:
runs-on: ubuntu-latest
steps:
- run: echo "hello world" > test.txt
- name: curl upload
run: curl --silent --show-error --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt
- uses: actions/download-artifact@v2
with:
name: my-artifact
path: test-artifacts
- name: 'Verify Artifact #1'
run: |
file="test-artifacts/secret.txt"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if [ "$(cat $file)" != "hello world" ] ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
- name: Verify download should work by clean extra dots
run: curl --silent --show-error --fail --path-as-is -o out.txt ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt
- name: 'Verify download content'
run: |
file="out.txt"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if [ "$(cat $file)" != "hello world" ] ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi

View file

@ -1,230 +0,0 @@
name: "Test that artifact uploads and downloads succeed"
on: push
jobs:
test-artifacts:
runs-on: ubuntu-latest
steps:
- run: mkdir -p path/to/artifact
- run: echo hello > path/to/artifact/world.txt
- uses: actions/upload-artifact@v2
with:
name: my-artifact
path: path/to/artifact/world.txt
- run: rm -rf path
- uses: actions/download-artifact@v2
with:
name: my-artifact
- name: Display structure of downloaded files
run: ls -la
# Test end-to-end by uploading two artifacts and then downloading them
- name: Create artifact files
run: |
mkdir -p path/to/dir-1
mkdir -p path/to/dir-2
mkdir -p path/to/dir-3
mkdir -p path/to/dir-5
mkdir -p path/to/dir-6
mkdir -p path/to/dir-7
echo "Lorem ipsum dolor sit amet" > path/to/dir-1/file1.txt
echo "Hello world from file #2" > path/to/dir-2/file2.txt
echo "This is a going to be a test for a large enough file that should get compressed with GZip. The @actions/artifact package uses GZip to upload files. This text should have a compression ratio greater than 100% so it should get uploaded using GZip" > path/to/dir-3/gzip.txt
dd if=/dev/random of=path/to/dir-5/file5.rnd bs=1024 count=1024
dd if=/dev/random of=path/to/dir-6/file6.rnd bs=1024 count=$((10*1024))
dd if=/dev/random of=path/to/dir-7/file7.rnd bs=1024 count=$((10*1024))
# Upload a single file artifact
- name: 'Upload artifact #1'
uses: actions/upload-artifact@v2
with:
name: 'Artifact-A'
path: path/to/dir-1/file1.txt
# Upload using a wildcard pattern, name should default to 'artifact' if not provided
- name: 'Upload artifact #2'
uses: actions/upload-artifact@v2
with:
path: path/**/dir*/
# Upload a directory that contains a file that will be uploaded with GZip
- name: 'Upload artifact #3'
uses: actions/upload-artifact@v2
with:
name: 'GZip-Artifact'
path: path/to/dir-3/
# Upload a directory that contains a file that will be uploaded with GZip
- name: 'Upload artifact #4'
uses: actions/upload-artifact@v2
with:
name: 'Multi-Path-Artifact'
path: |
path/to/dir-1/*
path/to/dir-[23]/*
!path/to/dir-3/*.txt
# Upload a mid-size file artifact
- name: 'Upload artifact #5'
uses: actions/upload-artifact@v2
with:
name: 'Mid-Size-Artifact'
path: path/to/dir-5/file5.rnd
# Upload a big file artifact
- name: 'Upload artifact #6'
uses: actions/upload-artifact@v2
with:
name: 'Big-Artifact'
path: path/to/dir-6/file6.rnd
# Upload a big file artifact twice
- name: 'Upload artifact #7 (First)'
uses: actions/upload-artifact@v2
with:
name: 'Big-Uploaded-Twice'
path: path/to/dir-7/file7.rnd
# Upload a big file artifact twice
- name: 'Upload artifact #7 (Second)'
uses: actions/upload-artifact@v2
with:
name: 'Big-Uploaded-Twice'
path: path/to/dir-7/file7.rnd
# Verify artifacts. Switch to download-artifact@v2 once it's out of preview
# Download Artifact #1 and verify the correctness of the content
- name: 'Download artifact #1'
uses: actions/download-artifact@v2
with:
name: 'Artifact-A'
path: some/new/path
- name: 'Verify Artifact #1'
run: |
file="some/new/path/file1.txt"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if [ "$(cat $file)" != "Lorem ipsum dolor sit amet" ] ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
# Download Artifact #2 and verify the correctness of the content
- name: 'Download artifact #2'
uses: actions/download-artifact@v2
with:
name: 'artifact'
path: some/other/path
- name: 'Verify Artifact #2'
run: |
file1="some/other/path/to/dir-1/file1.txt"
file2="some/other/path/to/dir-2/file2.txt"
if [ ! -f $file1 -o ! -f $file2 ] ; then
echo "Expected files do not exist"
exit 1
fi
if [ "$(cat $file1)" != "Lorem ipsum dolor sit amet" -o "$(cat $file2)" != "Hello world from file #2" ] ; then
echo "File contents of downloaded artifacts are incorrect"
exit 1
fi
# Download Artifact #3 and verify the correctness of the content
- name: 'Download artifact #3'
uses: actions/download-artifact@v2
with:
name: 'GZip-Artifact'
path: gzip/artifact/path
# Because a directory was used as input during the upload the parent directories, path/to/dir-3/, should not be included in the uploaded artifact
- name: 'Verify Artifact #3'
run: |
gzipFile="gzip/artifact/path/gzip.txt"
if [ ! -f $gzipFile ] ; then
echo "Expected file do not exist"
exit 1
fi
if [ "$(cat $gzipFile)" != "This is a going to be a test for a large enough file that should get compressed with GZip. The @actions/artifact package uses GZip to upload files. This text should have a compression ratio greater than 100% so it should get uploaded using GZip" ] ; then
echo "File contents of downloaded artifact is incorrect"
exit 1
fi
- name: 'Download artifact #4'
uses: actions/download-artifact@v2
with:
name: 'Multi-Path-Artifact'
path: multi/artifact
- name: 'Verify Artifact #4'
run: |
file1="multi/artifact/dir-1/file1.txt"
file2="multi/artifact/dir-2/file2.txt"
if [ ! -f $file1 -o ! -f $file2 ] ; then
echo "Expected files do not exist"
exit 1
fi
if [ "$(cat $file1)" != "Lorem ipsum dolor sit amet" -o "$(cat $file2)" != "Hello world from file #2" ] ; then
echo "File contents of downloaded artifacts are incorrect"
exit 1
fi
- name: 'Download artifact #5'
uses: actions/download-artifact@v2
with:
name: 'Mid-Size-Artifact'
path: mid-size/artifact/path
- name: 'Verify Artifact #5'
run: |
file="mid-size/artifact/path/file5.rnd"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if ! diff $file path/to/dir-5/file5.rnd ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
- name: 'Download artifact #6'
uses: actions/download-artifact@v2
with:
name: 'Big-Artifact'
path: big/artifact/path
- name: 'Verify Artifact #6'
run: |
file="big/artifact/path/file6.rnd"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if ! diff $file path/to/dir-6/file6.rnd ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
- name: 'Download artifact #7'
uses: actions/download-artifact@v2
with:
name: 'Big-Uploaded-Twice'
path: big-uploaded-twice/artifact/path
- name: 'Verify Artifact #7'
run: |
file="big-uploaded-twice/artifact/path/file7.rnd"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if ! diff $file path/to/dir-7/file7.rnd ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi