mirror of https://github.com/actions/toolkit
[Artifacts] Prepare for v2.0.0 of @actions/artifact (#1479)
* Prepare for v2.0.0 of @actions/artifact * Run prettier * temporary disable unused varspull/1480/head
parent
91d3933eb5
commit
c4f5ce2665
|
@ -1,3 +1,5 @@
|
|||
# Temporarily disabled while v2.0.0 of @actions/artifact is under development
|
||||
|
||||
name: artifact-unit-tests
|
||||
on:
|
||||
push:
|
||||
|
|
|
@ -2,212 +2,12 @@
|
|||
|
||||
## Usage
|
||||
|
||||
You can use this package to interact with the actions artifacts.
|
||||
- [Upload an Artifact](#Upload-an-Artifact)
|
||||
- [Download a Single Artifact](#Download-a-Single-Artifact)
|
||||
- [Download All Artifacts](#Download-all-Artifacts)
|
||||
- [Additional Documentation](#Additional-Documentation)
|
||||
- [Contributions](#Contributions)
|
||||
You can use this package to interact with the Actions artifacts.
|
||||
|
||||
Relative paths and absolute paths are both allowed. Relative paths are rooted against the current working directory.
|
||||
This most recently published version of this package (`1.1.1`) can be found [here](https://github.com/actions/toolkit/tree/@actions/artifact@1.1.1/packages/artifact)
|
||||
|
||||
## Upload an Artifact
|
||||
## 🚧 Under construction 🚧
|
||||
|
||||
Method Name: `uploadArtifact`
|
||||
This package is currently undergoing a major overhaul in preparation for `v4` versions of `upload-artifact` and `download-artifact` (these Actions will use a new `2.0.0` version of `@actions/artifact` that will soon be released). The upcoming version of `@actions/artifact` will take advantage of a major re-architecture with entirely new APIs.
|
||||
|
||||
#### Inputs
|
||||
- `name`
|
||||
- The name of the artifact that is being uploaded
|
||||
- Required
|
||||
- `files`
|
||||
- A list of file paths that describe what should be uploaded as part of the artifact
|
||||
- If a path is provided that does not exist, an error will be thrown
|
||||
- Can be absolute or relative. Internally everything is normalized and resolved
|
||||
- Required
|
||||
- `rootDirectory`
|
||||
- A file path that denotes the root directory of the files being uploaded. This path is used to strip the paths provided in `files` to control how they are uploaded and structured
|
||||
- If a file specified in `files` is not in the `rootDirectory`, an error will be thrown
|
||||
- Required
|
||||
- `options`
|
||||
- Extra options that allow for the customization of the upload behavior
|
||||
- Optional
|
||||
|
||||
#### Available Options
|
||||
|
||||
- `continueOnError`
|
||||
- Indicates if the artifact upload should continue in the event a file fails to upload. If there is a error during upload, a partial artifact will always be created and available for download at the end. The `size` reported will be the amount of storage that the user or org will be charged for the partial artifact.
|
||||
- If set to `false`, and an error is encountered, all other uploads will stop and any files that were queued will not be attempted to be uploaded. The partial artifact available will only include files up until the failure.
|
||||
- If set to `true` and an error is encountered, the failed file will be skipped and ignored and all other queued files will be attempted to be uploaded. There will be an artifact available for download at the end with everything excluding the file that failed to upload
|
||||
- Optional, defaults to `true` if not specified
|
||||
- `retentionDays`
|
||||
- Duration after which artifact will expire in days
|
||||
- Minimum value: 1
|
||||
- Maximum value: 90 unless changed by repository setting
|
||||
- If this is set to a greater value than the retention settings allowed, the retention on artifacts will be reduced to match the max value allowed on the server, and the upload process will continue. An input of 0 assumes default retention value.
|
||||
|
||||
#### Example using Absolute File Paths
|
||||
|
||||
```js
|
||||
const artifact = require('@actions/artifact');
|
||||
const artifactClient = artifact.create()
|
||||
const artifactName = 'my-artifact';
|
||||
const files = [
|
||||
'/home/user/files/plz-upload/file1.txt',
|
||||
'/home/user/files/plz-upload/file2.txt',
|
||||
'/home/user/files/plz-upload/dir/file3.txt'
|
||||
]
|
||||
const rootDirectory = '/home/user/files/plz-upload'
|
||||
const options = {
|
||||
continueOnError: true
|
||||
}
|
||||
|
||||
const uploadResult = await artifactClient.uploadArtifact(artifactName, files, rootDirectory, options)
|
||||
```
|
||||
|
||||
#### Example using Relative File Paths
|
||||
```js
|
||||
// Assuming the current working directory is /home/user/files/plz-upload
|
||||
const artifact = require('@actions/artifact');
|
||||
const artifactClient = artifact.create()
|
||||
const artifactName = 'my-artifact';
|
||||
const files = [
|
||||
'file1.txt',
|
||||
'file2.txt',
|
||||
'dir/file3.txt'
|
||||
]
|
||||
|
||||
const rootDirectory = '.' // Also possible to use __dirname
|
||||
const options = {
|
||||
continueOnError: false
|
||||
}
|
||||
|
||||
const uploadResponse = await artifactClient.uploadArtifact(artifactName, files, rootDirectory, options)
|
||||
```
|
||||
|
||||
#### Upload Result
|
||||
|
||||
The returned `UploadResponse` will contain the following information
|
||||
|
||||
- `artifactName`
|
||||
- The name of the artifact that was uploaded
|
||||
- `artifactItems`
|
||||
- A list of all files that describe what is uploaded if there are no errors encountered. Usually this will be equal to the inputted `files` with the exception of empty directories (will not be uploaded)
|
||||
- `size`
|
||||
- Total size of the artifact that was uploaded in bytes
|
||||
- `failedItems`
|
||||
- A list of items that were not uploaded successfully (this will include queued items that were not uploaded if `continueOnError` is set to false). This is a subset of `artifactItems`
|
||||
|
||||
## Download a Single Artifact
|
||||
|
||||
Method Name: `downloadArtifact`
|
||||
|
||||
#### Inputs
|
||||
- `name`
|
||||
- The name of the artifact to download
|
||||
- Required
|
||||
- `path`
|
||||
- Path that denotes where the artifact will be downloaded to
|
||||
- Optional. Defaults to the GitHub workspace directory(`$GITHUB_WORKSPACE`) if not specified
|
||||
- `options`
|
||||
- Extra options that allow for the customization of the download behavior
|
||||
- Optional
|
||||
|
||||
|
||||
#### Available Options
|
||||
|
||||
- `createArtifactFolder`
|
||||
- Specifies if a folder (the artifact name) is created for the artifact that is downloaded (contents downloaded into this folder),
|
||||
- Optional. Defaults to false if not specified
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const artifact = require('@actions/artifact');
|
||||
const artifactClient = artifact.create()
|
||||
const artifactName = 'my-artifact';
|
||||
const path = 'some/directory'
|
||||
const options = {
|
||||
createArtifactFolder: false
|
||||
}
|
||||
|
||||
const downloadResponse = await artifactClient.downloadArtifact(artifactName, path, options)
|
||||
|
||||
// Post download, the directory structure will look like this
|
||||
/some
|
||||
/directory
|
||||
/file1.txt
|
||||
/file2.txt
|
||||
/dir
|
||||
/file3.txt
|
||||
|
||||
// If createArtifactFolder is set to true, the directory structure will look like this
|
||||
/some
|
||||
/directory
|
||||
/my-artifact
|
||||
/file1.txt
|
||||
/file2.txt
|
||||
/dir
|
||||
/file3.txt
|
||||
```
|
||||
|
||||
#### Download Response
|
||||
|
||||
The returned `DownloadResponse` will contain the following information
|
||||
|
||||
- `artifactName`
|
||||
- The name of the artifact that was downloaded
|
||||
- `downloadPath`
|
||||
- The full Path to where the artifact was downloaded
|
||||
|
||||
|
||||
## Download All Artifacts
|
||||
|
||||
Method Name: `downloadAllArtifacts`
|
||||
|
||||
#### Inputs
|
||||
- `path`
|
||||
- Path that denotes where the artifact will be downloaded to
|
||||
- Optional. Defaults to the GitHub workspace directory(`$GITHUB_WORKSPACE`) if not specified
|
||||
|
||||
```js
|
||||
const artifact = require('@actions/artifact');
|
||||
const artifactClient = artifact.create();
|
||||
const downloadResponse = await artifactClient.downloadAllArtifacts();
|
||||
|
||||
// output result
|
||||
for (response in downloadResponse) {
|
||||
console.log(response.artifactName);
|
||||
console.log(response.downloadPath);
|
||||
}
|
||||
```
|
||||
|
||||
Because there are multiple artifacts, an extra directory (denoted by the name of the artifact) will be created for each artifact in the path. With 2 artifacts(`my-artifact-1` and `my-artifact-2` for example) and the default path, the directory structure will be as follows:
|
||||
```js
|
||||
/GITHUB_WORKSPACE
|
||||
/my-artifact-1
|
||||
/ .. contents of `my-artifact-1`
|
||||
/my-artifact-2
|
||||
/ .. contents of `my-artifact-2`
|
||||
```
|
||||
|
||||
#### Download Result
|
||||
|
||||
An array will be returned that describes the results for downloading all artifacts. The number of items in the array indicates the number of artifacts that were downloaded.
|
||||
|
||||
Each artifact will have the same `DownloadResponse` as if it was individually downloaded
|
||||
- `artifactName`
|
||||
- The name of the artifact that was downloaded
|
||||
- `downloadPath`
|
||||
- The full Path to where the artifact was downloaded
|
||||
|
||||
## Additional Documentation
|
||||
|
||||
Check out [additional-information](docs/additional-information.md) for extra documentation around usage, restrictions and behavior.
|
||||
|
||||
Check out [implementation-details](docs/implementation-details.md) for extra information about the implementation of this package.
|
||||
|
||||
## Contributions
|
||||
|
||||
See [contributor guidelines](https://github.com/actions/toolkit/blob/main/.github/CONTRIBUTING.md) for general guidelines and information about toolkit contributions.
|
||||
|
||||
For contributions related to this package, see [artifact contributions](CONTRIBUTIONS.md) for more information.
|
||||
The upcoming `2.0.0` package and `v4` artifact Actions aim to solve some of the major pain-points that have made artifact usage difficult up until now.
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
name: 'Set env variables'
|
||||
description: 'Sets certain env variables so that e2e artifact upload and download can be tested in a shell'
|
||||
runs:
|
||||
using: 'node12'
|
||||
main: 'index.js'
|
|
@ -1,14 +0,0 @@
|
|||
// Certain env variables are not set by default in a shell context and are only available in a node context from a running action
|
||||
// In order to be able to upload and download artifacts e2e in a shell when running CI tests, we need these env variables set
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const filePath = process.env[`GITHUB_ENV`]
|
||||
fs.appendFileSync(filePath, `ACTIONS_RUNTIME_URL=${process.env.ACTIONS_RUNTIME_URL}${os.EOL}`, {
|
||||
encoding: 'utf8'
|
||||
})
|
||||
fs.appendFileSync(filePath, `ACTIONS_RUNTIME_TOKEN=${process.env.ACTIONS_RUNTIME_TOKEN}${os.EOL}`, {
|
||||
encoding: 'utf8'
|
||||
})
|
||||
fs.appendFileSync(filePath, `GITHUB_RUN_ID=${process.env.GITHUB_RUN_ID}${os.EOL}`, {
|
||||
encoding: 'utf8'
|
||||
})
|
|
@ -1,57 +0,0 @@
|
|||
import CRC64, {CRC64DigestEncoding} from '../src/internal/crc64'
|
||||
|
||||
const fixtures = {
|
||||
data:
|
||||
'🚀 👉😎👉 Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n',
|
||||
expected: {
|
||||
hex: '846CE4ADAD6223ED',
|
||||
base64: '7SNira3kbIQ=',
|
||||
buffer: Buffer.from([0xed, 0x23, 0x62, 0xad, 0xad, 0xe4, 0x6c, 0x84])
|
||||
}
|
||||
}
|
||||
|
||||
function assertEncodings(crc: CRC64): void {
|
||||
const encodings = Object.keys(fixtures.expected) as CRC64DigestEncoding[]
|
||||
for (const encoding of encodings) {
|
||||
expect(crc.digest(encoding)).toEqual(fixtures.expected[encoding])
|
||||
}
|
||||
}
|
||||
|
||||
describe('@actions/artifact/src/internal/crc64', () => {
|
||||
it('CRC64 from string', async () => {
|
||||
const crc = new CRC64()
|
||||
crc.update(fixtures.data)
|
||||
|
||||
assertEncodings(crc)
|
||||
})
|
||||
|
||||
it('CRC64 from buffer', async () => {
|
||||
const crc = new CRC64()
|
||||
const buf = Buffer.from(fixtures.data)
|
||||
crc.update(buf)
|
||||
|
||||
assertEncodings(crc)
|
||||
})
|
||||
|
||||
it('CRC64 from split data', async () => {
|
||||
const crc = new CRC64()
|
||||
const splits = fixtures.data.split('\n').slice(0, -1)
|
||||
for (const split of splits) {
|
||||
crc.update(`${split}\n`)
|
||||
}
|
||||
|
||||
assertEncodings(crc)
|
||||
})
|
||||
|
||||
it('flips 64 bits', async () => {
|
||||
const tests = [
|
||||
[BigInt(0), BigInt('0xffffffffffffffff')],
|
||||
[BigInt('0xffffffffffffffff'), BigInt(0)],
|
||||
[BigInt('0xdeadbeef'), BigInt('0xffffffff21524110')]
|
||||
]
|
||||
|
||||
for (const [input, expected] of tests) {
|
||||
expect(CRC64.flip64Bits(input)).toEqual(expected)
|
||||
}
|
||||
})
|
||||
})
|
|
@ -1,552 +0,0 @@
|
|||
import * as path from 'path'
|
||||
import * as core from '@actions/core'
|
||||
import {URL} from 'url'
|
||||
import {getDownloadSpecification} from '../src/internal/download-specification'
|
||||
import {ContainerEntry} from '../src/internal/contracts'
|
||||
|
||||
const artifact1Name = 'my-artifact'
|
||||
const artifact2Name = 'my-artifact-extra'
|
||||
|
||||
// Populating with only the information that is necessary
|
||||
function getPartialContainerEntry(): ContainerEntry {
|
||||
return {
|
||||
containerId: 10,
|
||||
scopeIdentifier: '00000000-0000-0000-0000-000000000000',
|
||||
path: 'ADD_INFORMATION',
|
||||
itemType: 'ADD_INFORMATION',
|
||||
status: 'created',
|
||||
dateCreated: '2020-02-06T22:13:35.373Z',
|
||||
dateLastModified: '2020-02-06T22:13:35.453Z',
|
||||
createdBy: '82f0bf89-6e55-4e5a-b8b6-f75eb992578c',
|
||||
lastModifiedBy: '82f0bf89-6e55-4e5a-b8b6-f75eb992578c',
|
||||
itemLocation: 'ADD_INFORMATION',
|
||||
contentLocation: 'ADD_INFORMATION',
|
||||
contentId: '',
|
||||
fileLength: 100
|
||||
}
|
||||
}
|
||||
|
||||
function createFileEntry(entryPath: string): ContainerEntry {
|
||||
const newFileEntry = getPartialContainerEntry()
|
||||
newFileEntry.path = entryPath
|
||||
newFileEntry.itemType = 'file'
|
||||
newFileEntry.itemLocation = createItemLocation(entryPath)
|
||||
newFileEntry.contentLocation = createContentLocation(entryPath)
|
||||
return newFileEntry
|
||||
}
|
||||
|
||||
function createDirectoryEntry(directoryPath: string): ContainerEntry {
|
||||
const newDirectoryEntry = getPartialContainerEntry()
|
||||
newDirectoryEntry.path = directoryPath
|
||||
newDirectoryEntry.itemType = 'folder'
|
||||
newDirectoryEntry.itemLocation = createItemLocation(directoryPath)
|
||||
newDirectoryEntry.contentLocation = createContentLocation(directoryPath)
|
||||
return newDirectoryEntry
|
||||
}
|
||||
|
||||
function createItemLocation(relativePath: string): string {
|
||||
const itemLocation = new URL(
|
||||
'https://testing/_apis/resources/Containers/10000'
|
||||
)
|
||||
itemLocation.searchParams.append('itemPath', relativePath)
|
||||
itemLocation.searchParams.append('metadata', 'true')
|
||||
return itemLocation.toString()
|
||||
}
|
||||
|
||||
function createContentLocation(relativePath: string): string {
|
||||
const itemLocation = new URL(
|
||||
'https://testing/_apis/resources/Containers/10000'
|
||||
)
|
||||
itemLocation.searchParams.append('itemPath', relativePath)
|
||||
return itemLocation.toString()
|
||||
}
|
||||
|
||||
/*
|
||||
Represents a set of container entries for two artifacts with the following directory structure
|
||||
|
||||
/my-artifact
|
||||
/file1.txt
|
||||
/file2.txt
|
||||
/dir1
|
||||
/file3.txt
|
||||
/dir2
|
||||
/dir3
|
||||
/dir4
|
||||
file4.txt
|
||||
file5.txt (no length property)
|
||||
file6.txt (empty file)
|
||||
/my-artifact-extra
|
||||
/file1.txt
|
||||
*/
|
||||
|
||||
// main artifact
|
||||
const file1Path = path.join(artifact1Name, 'file1.txt')
|
||||
const file2Path = path.join(artifact1Name, 'file2.txt')
|
||||
const dir1Path = path.join(artifact1Name, 'dir1')
|
||||
const file3Path = path.join(dir1Path, 'file3.txt')
|
||||
const dir2Path = path.join(dir1Path, 'dir2')
|
||||
const dir3Path = path.join(dir2Path, 'dir3')
|
||||
const dir4Path = path.join(dir3Path, 'dir4')
|
||||
const file4Path = path.join(dir4Path, 'file4.txt')
|
||||
const file5Path = path.join(dir4Path, 'file5.txt')
|
||||
const file6Path = path.join(dir4Path, 'file6.txt')
|
||||
|
||||
const rootDirectoryEntry = createDirectoryEntry(artifact1Name)
|
||||
const directoryEntry1 = createDirectoryEntry(dir1Path)
|
||||
const directoryEntry2 = createDirectoryEntry(dir2Path)
|
||||
const directoryEntry3 = createDirectoryEntry(dir3Path)
|
||||
const directoryEntry4 = createDirectoryEntry(dir4Path)
|
||||
const fileEntry1 = createFileEntry(file1Path)
|
||||
const fileEntry2 = createFileEntry(file2Path)
|
||||
const fileEntry3 = createFileEntry(file3Path)
|
||||
const fileEntry4 = createFileEntry(file4Path)
|
||||
|
||||
const missingLengthFileEntry = createFileEntry(file5Path)
|
||||
missingLengthFileEntry.fileLength = undefined // one file does not have a fileLength
|
||||
const emptyLengthFileEntry = createFileEntry(file6Path)
|
||||
emptyLengthFileEntry.fileLength = 0 // empty file path
|
||||
|
||||
// extra artifact
|
||||
const artifact2File1Path = path.join(artifact2Name, 'file1.txt')
|
||||
const rootDirectoryEntry2 = createDirectoryEntry(artifact2Name)
|
||||
const extraFileEntry = createFileEntry(artifact2File1Path)
|
||||
|
||||
const artifactContainerEntries: ContainerEntry[] = [
|
||||
rootDirectoryEntry,
|
||||
fileEntry1,
|
||||
fileEntry2,
|
||||
directoryEntry1,
|
||||
fileEntry3,
|
||||
directoryEntry2,
|
||||
directoryEntry3,
|
||||
directoryEntry4,
|
||||
fileEntry4,
|
||||
missingLengthFileEntry,
|
||||
emptyLengthFileEntry,
|
||||
rootDirectoryEntry2,
|
||||
extraFileEntry
|
||||
]
|
||||
|
||||
describe('Search', () => {
|
||||
beforeAll(async () => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
})
|
||||
|
||||
it('Download Specification - Absolute Path with no root directory', () => {
|
||||
const testDownloadPath = path.join(
|
||||
__dirname,
|
||||
'some',
|
||||
'destination',
|
||||
'folder'
|
||||
)
|
||||
|
||||
const specification = getDownloadSpecification(
|
||||
artifact1Name,
|
||||
artifactContainerEntries,
|
||||
testDownloadPath,
|
||||
false
|
||||
)
|
||||
|
||||
expect(specification.rootDownloadLocation).toEqual(testDownloadPath)
|
||||
expect(specification.filesToDownload.length).toEqual(5)
|
||||
|
||||
const item1ExpectedTargetPath = path.join(testDownloadPath, 'file1.txt')
|
||||
const item2ExpectedTargetPath = path.join(testDownloadPath, 'file2.txt')
|
||||
const item3ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'file3.txt'
|
||||
)
|
||||
const item4ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file4.txt'
|
||||
)
|
||||
const item5ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file5.txt'
|
||||
)
|
||||
const item6ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file6.txt'
|
||||
)
|
||||
|
||||
const targetLocations = specification.filesToDownload.map(
|
||||
item => item.targetPath
|
||||
)
|
||||
expect(targetLocations).toContain(item1ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item2ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item3ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item4ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item5ExpectedTargetPath)
|
||||
|
||||
for (const downloadItem of specification.filesToDownload) {
|
||||
if (downloadItem.targetPath === item1ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file1Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item2ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file2Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item3ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file3Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item4ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file4Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item5ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file5Path)
|
||||
)
|
||||
} else {
|
||||
throw new Error('this should never be reached')
|
||||
}
|
||||
}
|
||||
|
||||
expect(specification.directoryStructure.length).toEqual(3)
|
||||
expect(specification.directoryStructure).toContain(testDownloadPath)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, 'dir1')
|
||||
)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, 'dir1', 'dir2', 'dir3', 'dir4')
|
||||
)
|
||||
|
||||
expect(specification.emptyFilesToCreate.length).toEqual(1)
|
||||
expect(specification.emptyFilesToCreate).toContain(item6ExpectedTargetPath)
|
||||
})
|
||||
|
||||
it('Download Specification - Relative Path with no root directory', () => {
|
||||
const testDownloadPath = path.join('some', 'destination', 'folder')
|
||||
|
||||
const specification = getDownloadSpecification(
|
||||
artifact1Name,
|
||||
artifactContainerEntries,
|
||||
testDownloadPath,
|
||||
false
|
||||
)
|
||||
|
||||
expect(specification.rootDownloadLocation).toEqual(testDownloadPath)
|
||||
expect(specification.filesToDownload.length).toEqual(5)
|
||||
|
||||
const item1ExpectedTargetPath = path.join(testDownloadPath, 'file1.txt')
|
||||
const item2ExpectedTargetPath = path.join(testDownloadPath, 'file2.txt')
|
||||
const item3ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'file3.txt'
|
||||
)
|
||||
const item4ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file4.txt'
|
||||
)
|
||||
const item5ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file5.txt'
|
||||
)
|
||||
const item6ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file6.txt'
|
||||
)
|
||||
|
||||
const targetLocations = specification.filesToDownload.map(
|
||||
item => item.targetPath
|
||||
)
|
||||
expect(targetLocations).toContain(item1ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item2ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item3ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item4ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item5ExpectedTargetPath)
|
||||
|
||||
for (const downloadItem of specification.filesToDownload) {
|
||||
if (downloadItem.targetPath === item1ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file1Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item2ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file2Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item3ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file3Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item4ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file4Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item5ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file5Path)
|
||||
)
|
||||
} else {
|
||||
throw new Error('this should never be reached')
|
||||
}
|
||||
}
|
||||
|
||||
expect(specification.directoryStructure.length).toEqual(3)
|
||||
expect(specification.directoryStructure).toContain(testDownloadPath)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, 'dir1')
|
||||
)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, 'dir1', 'dir2', 'dir3', 'dir4')
|
||||
)
|
||||
|
||||
expect(specification.emptyFilesToCreate.length).toEqual(1)
|
||||
expect(specification.emptyFilesToCreate).toContain(item6ExpectedTargetPath)
|
||||
})
|
||||
|
||||
it('Download Specification - Absolute Path with root directory', () => {
|
||||
const testDownloadPath = path.join(
|
||||
__dirname,
|
||||
'some',
|
||||
'destination',
|
||||
'folder'
|
||||
)
|
||||
|
||||
const specification = getDownloadSpecification(
|
||||
artifact1Name,
|
||||
artifactContainerEntries,
|
||||
testDownloadPath,
|
||||
true
|
||||
)
|
||||
|
||||
expect(specification.rootDownloadLocation).toEqual(
|
||||
path.join(testDownloadPath, artifact1Name)
|
||||
)
|
||||
expect(specification.filesToDownload.length).toEqual(5)
|
||||
|
||||
const item1ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'file1.txt'
|
||||
)
|
||||
const item2ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'file2.txt'
|
||||
)
|
||||
const item3ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'file3.txt'
|
||||
)
|
||||
const item4ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file4.txt'
|
||||
)
|
||||
const item5ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file5.txt'
|
||||
)
|
||||
const item6ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file6.txt'
|
||||
)
|
||||
|
||||
const targetLocations = specification.filesToDownload.map(
|
||||
item => item.targetPath
|
||||
)
|
||||
expect(targetLocations).toContain(item1ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item2ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item3ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item4ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item5ExpectedTargetPath)
|
||||
|
||||
for (const downloadItem of specification.filesToDownload) {
|
||||
if (downloadItem.targetPath === item1ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file1Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item2ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file2Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item3ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file3Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item4ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file4Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item5ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file5Path)
|
||||
)
|
||||
} else {
|
||||
throw new Error('this should never be reached')
|
||||
}
|
||||
}
|
||||
|
||||
expect(specification.directoryStructure.length).toEqual(3)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, artifact1Name)
|
||||
)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, dir1Path)
|
||||
)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, dir4Path)
|
||||
)
|
||||
|
||||
expect(specification.emptyFilesToCreate.length).toEqual(1)
|
||||
expect(specification.emptyFilesToCreate).toContain(item6ExpectedTargetPath)
|
||||
})
|
||||
|
||||
it('Download Specification - Relative Path with root directory', () => {
|
||||
const testDownloadPath = path.join('some', 'destination', 'folder')
|
||||
|
||||
const specification = getDownloadSpecification(
|
||||
artifact1Name,
|
||||
artifactContainerEntries,
|
||||
testDownloadPath,
|
||||
true
|
||||
)
|
||||
|
||||
expect(specification.rootDownloadLocation).toEqual(
|
||||
path.join(testDownloadPath, artifact1Name)
|
||||
)
|
||||
expect(specification.filesToDownload.length).toEqual(5)
|
||||
|
||||
const item1ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'file1.txt'
|
||||
)
|
||||
const item2ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'file2.txt'
|
||||
)
|
||||
const item3ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'file3.txt'
|
||||
)
|
||||
const item4ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file4.txt'
|
||||
)
|
||||
const item5ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file5.txt'
|
||||
)
|
||||
const item6ExpectedTargetPath = path.join(
|
||||
testDownloadPath,
|
||||
artifact1Name,
|
||||
'dir1',
|
||||
'dir2',
|
||||
'dir3',
|
||||
'dir4',
|
||||
'file6.txt'
|
||||
)
|
||||
|
||||
const targetLocations = specification.filesToDownload.map(
|
||||
item => item.targetPath
|
||||
)
|
||||
expect(targetLocations).toContain(item1ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item2ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item3ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item4ExpectedTargetPath)
|
||||
expect(targetLocations).toContain(item5ExpectedTargetPath)
|
||||
|
||||
for (const downloadItem of specification.filesToDownload) {
|
||||
if (downloadItem.targetPath === item1ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file1Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item2ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file2Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item3ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file3Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item4ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file4Path)
|
||||
)
|
||||
} else if (downloadItem.targetPath === item5ExpectedTargetPath) {
|
||||
expect(downloadItem.sourceLocation).toEqual(
|
||||
createContentLocation(file5Path)
|
||||
)
|
||||
} else {
|
||||
throw new Error('this should never be reached')
|
||||
}
|
||||
}
|
||||
|
||||
expect(specification.directoryStructure.length).toEqual(3)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, artifact1Name)
|
||||
)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, dir1Path)
|
||||
)
|
||||
expect(specification.directoryStructure).toContain(
|
||||
path.join(testDownloadPath, dir4Path)
|
||||
)
|
||||
|
||||
expect(specification.emptyFilesToCreate.length).toEqual(1)
|
||||
expect(specification.emptyFilesToCreate).toContain(item6ExpectedTargetPath)
|
||||
})
|
||||
})
|
|
@ -1,490 +0,0 @@
|
|||
import * as core from '@actions/core'
|
||||
import * as http from 'http'
|
||||
import * as io from '../../io/src/io'
|
||||
import * as net from 'net'
|
||||
import * as path from 'path'
|
||||
import * as configVariables from '../src/internal/config-variables'
|
||||
import {promises as fs} from 'fs'
|
||||
import {DownloadItem} from '../src/internal/download-specification'
|
||||
import {HttpClient, HttpClientResponse} from '@actions/http-client'
|
||||
import {DownloadHttpClient} from '../src/internal/download-http-client'
|
||||
import {
|
||||
ListArtifactsResponse,
|
||||
QueryArtifactResponse
|
||||
} from '../src/internal/contracts'
|
||||
import * as stream from 'stream'
|
||||
import {gzip} from 'zlib'
|
||||
import {promisify} from 'util'
|
||||
|
||||
const root = path.join(__dirname, '_temp', 'artifact-download-tests')
|
||||
const defaultEncoding = 'utf8'
|
||||
|
||||
jest.mock('../src/internal/config-variables')
|
||||
jest.mock('@actions/http-client')
|
||||
|
||||
describe('Download Tests', () => {
|
||||
beforeAll(async () => {
|
||||
await io.rmRF(root)
|
||||
await fs.mkdir(path.join(root), {
|
||||
recursive: true
|
||||
})
|
||||
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'error').mockImplementation(() => {})
|
||||
})
|
||||
|
||||
/**
|
||||
* Test Listing Artifacts
|
||||
*/
|
||||
it('List Artifacts - Success', async () => {
|
||||
setupSuccessfulListArtifactsResponse()
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
const artifacts = await downloadHttpClient.listArtifacts()
|
||||
expect(artifacts.count).toEqual(2)
|
||||
|
||||
const artifactNames = artifacts.value.map(item => item.name)
|
||||
expect(artifactNames).toContain('artifact1-name')
|
||||
expect(artifactNames).toContain('artifact2-name')
|
||||
|
||||
for (const artifact of artifacts.value) {
|
||||
if (artifact.name === 'artifact1-name') {
|
||||
expect(artifact.url).toEqual(
|
||||
`${configVariables.getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=artifact1-name`
|
||||
)
|
||||
} else if (artifact.name === 'artifact2-name') {
|
||||
expect(artifact.url).toEqual(
|
||||
`${configVariables.getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=artifact2-name`
|
||||
)
|
||||
} else {
|
||||
throw new Error(
|
||||
'Invalid artifact combination. This should never be reached'
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it('List Artifacts - Failure', async () => {
|
||||
setupFailedResponse()
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
expect(downloadHttpClient.listArtifacts()).rejects.toThrow(
|
||||
'List Artifacts failed: Artifact service responded with 400'
|
||||
)
|
||||
})
|
||||
|
||||
/**
|
||||
* Test Container Items
|
||||
*/
|
||||
it('Container Items - Success', async () => {
|
||||
setupSuccessfulContainerItemsResponse()
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
const response = await downloadHttpClient.getContainerItems(
|
||||
'artifact-name',
|
||||
configVariables.getRuntimeUrl()
|
||||
)
|
||||
expect(response.count).toEqual(2)
|
||||
|
||||
const itemPaths = response.value.map(item => item.path)
|
||||
expect(itemPaths).toContain('artifact-name')
|
||||
expect(itemPaths).toContain('artifact-name/file1.txt')
|
||||
|
||||
for (const containerEntry of response.value) {
|
||||
if (containerEntry.path === 'artifact-name') {
|
||||
expect(containerEntry.itemType).toEqual('folder')
|
||||
} else if (containerEntry.path === 'artifact-name/file1.txt') {
|
||||
expect(containerEntry.itemType).toEqual('file')
|
||||
} else {
|
||||
throw new Error(
|
||||
'Invalid container combination. This should never be reached'
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it('Container Items - Failure', async () => {
|
||||
setupFailedResponse()
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
expect(
|
||||
downloadHttpClient.getContainerItems(
|
||||
'artifact-name',
|
||||
configVariables.getRuntimeUrl()
|
||||
)
|
||||
).rejects.toThrow(
|
||||
`Get Container Items failed: Artifact service responded with 400`
|
||||
)
|
||||
})
|
||||
|
||||
it('Test downloading an individual artifact with gzip', async () => {
|
||||
const fileContents = Buffer.from(
|
||||
'gzip worked on the first try\n',
|
||||
defaultEncoding
|
||||
)
|
||||
const targetPath = path.join(root, 'FileA.txt')
|
||||
|
||||
setupDownloadItemResponse(fileContents, true, 200, false, false)
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const items: DownloadItem[] = []
|
||||
items.push({
|
||||
sourceLocation: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13?itemPath=my-artifact%2FFileA.txt`,
|
||||
targetPath
|
||||
})
|
||||
|
||||
await expect(
|
||||
downloadHttpClient.downloadSingleArtifact(items)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
await checkDestinationFile(targetPath, fileContents)
|
||||
})
|
||||
|
||||
it('Test downloading an individual artifact without gzip', async () => {
|
||||
const fileContents = Buffer.from(
|
||||
'plaintext worked on the first try\n',
|
||||
defaultEncoding
|
||||
)
|
||||
const targetPath = path.join(root, 'FileB.txt')
|
||||
|
||||
setupDownloadItemResponse(fileContents, false, 200, false, false)
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const items: DownloadItem[] = []
|
||||
items.push({
|
||||
sourceLocation: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13?itemPath=my-artifact%2FFileB.txt`,
|
||||
targetPath
|
||||
})
|
||||
|
||||
await expect(
|
||||
downloadHttpClient.downloadSingleArtifact(items)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
await checkDestinationFile(targetPath, fileContents)
|
||||
})
|
||||
|
||||
it('Test retryable status codes during artifact download', async () => {
|
||||
// The first http response should return a retryable status call while the subsequent call should return a 200 so
|
||||
// the download should successfully finish
|
||||
const retryableStatusCodes = [429, 500, 502, 503, 504]
|
||||
for (const statusCode of retryableStatusCodes) {
|
||||
const fileContents = Buffer.from('try, try again\n', defaultEncoding)
|
||||
const targetPath = path.join(root, `FileC-${statusCode}.txt`)
|
||||
|
||||
setupDownloadItemResponse(fileContents, false, statusCode, false, true)
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const items: DownloadItem[] = []
|
||||
items.push({
|
||||
sourceLocation: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13?itemPath=my-artifact%2FFileC.txt`,
|
||||
targetPath
|
||||
})
|
||||
|
||||
await expect(
|
||||
downloadHttpClient.downloadSingleArtifact(items)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
await checkDestinationFile(targetPath, fileContents)
|
||||
}
|
||||
})
|
||||
|
||||
it('Test retry on truncated response with gzip', async () => {
|
||||
const fileContents = Buffer.from(
|
||||
'Sometimes gunzip fails on the first try\n',
|
||||
defaultEncoding
|
||||
)
|
||||
const targetPath = path.join(root, 'FileD.txt')
|
||||
|
||||
setupDownloadItemResponse(fileContents, true, 200, true, true)
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const items: DownloadItem[] = []
|
||||
items.push({
|
||||
sourceLocation: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13?itemPath=my-artifact%2FFileD.txt`,
|
||||
targetPath
|
||||
})
|
||||
|
||||
await expect(
|
||||
downloadHttpClient.downloadSingleArtifact(items)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
await checkDestinationFile(targetPath, fileContents)
|
||||
})
|
||||
|
||||
it('Test retry on truncated response without gzip', async () => {
|
||||
const fileContents = Buffer.from(
|
||||
'You have to inspect the content-length header to know if you got everything\n',
|
||||
defaultEncoding
|
||||
)
|
||||
const targetPath = path.join(root, 'FileE.txt')
|
||||
|
||||
setupDownloadItemResponse(fileContents, false, 200, true, true)
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const items: DownloadItem[] = []
|
||||
items.push({
|
||||
sourceLocation: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13?itemPath=my-artifact%2FFileD.txt`,
|
||||
targetPath
|
||||
})
|
||||
|
||||
await expect(
|
||||
downloadHttpClient.downloadSingleArtifact(items)
|
||||
).resolves.not.toThrow()
|
||||
|
||||
await checkDestinationFile(targetPath, fileContents)
|
||||
})
|
||||
|
||||
/**
|
||||
* Helper used to setup mocking for the HttpClient
|
||||
*/
|
||||
async function emptyMockReadBody(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Setups up HTTP GET response for a successful listArtifacts() call
|
||||
*/
|
||||
function setupSuccessfulListArtifactsResponse(): void {
|
||||
jest.spyOn(HttpClient.prototype, 'get').mockImplementationOnce(async () => {
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
let mockReadBody = emptyMockReadBody
|
||||
|
||||
mockMessage.statusCode = 201
|
||||
const response: ListArtifactsResponse = {
|
||||
count: 2,
|
||||
value: [
|
||||
{
|
||||
containerId: '13',
|
||||
size: -1,
|
||||
signedContent: 'false',
|
||||
fileContainerResourceUrl: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13`,
|
||||
type: 'actions_storage',
|
||||
name: 'artifact1-name',
|
||||
url: `${configVariables.getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=artifact1-name`
|
||||
},
|
||||
{
|
||||
containerId: '13',
|
||||
size: -1,
|
||||
signedContent: 'false',
|
||||
fileContainerResourceUrl: `${configVariables.getRuntimeUrl()}_apis/resources/Containers/13`,
|
||||
type: 'actions_storage',
|
||||
name: 'artifact2-name',
|
||||
url: `${configVariables.getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=artifact2-name`
|
||||
}
|
||||
]
|
||||
}
|
||||
const returnData: string = JSON.stringify(response, null, 2)
|
||||
mockReadBody = async function(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve(returnData)
|
||||
})
|
||||
}
|
||||
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: mockReadBody
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Setups up HTTP GET response for downloading items
|
||||
* @param isGzip is the downloaded item gzip encoded
|
||||
* @param firstHttpResponseCode the http response code that should be returned
|
||||
*/
|
||||
function setupDownloadItemResponse(
|
||||
fileContents: Buffer,
|
||||
isGzip: boolean,
|
||||
firstHttpResponseCode: number,
|
||||
truncateFirstResponse: boolean,
|
||||
retryExpected: boolean
|
||||
): void {
|
||||
const spyInstance = jest
|
||||
.spyOn(HttpClient.prototype, 'get')
|
||||
.mockImplementationOnce(async () => {
|
||||
if (firstHttpResponseCode === 200) {
|
||||
const fullResponse = await constructResponse(isGzip, fileContents)
|
||||
const actualResponse = truncateFirstResponse
|
||||
? fullResponse.subarray(0, 3)
|
||||
: fullResponse
|
||||
|
||||
return {
|
||||
message: getDownloadResponseMessage(
|
||||
firstHttpResponseCode,
|
||||
isGzip,
|
||||
fullResponse.length,
|
||||
actualResponse
|
||||
),
|
||||
readBody: emptyMockReadBody
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
message: getDownloadResponseMessage(
|
||||
firstHttpResponseCode,
|
||||
false,
|
||||
0,
|
||||
null
|
||||
),
|
||||
readBody: emptyMockReadBody
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// set up a second mock only if we expect a retry. Otherwise this mock will affect other tests.
|
||||
if (retryExpected) {
|
||||
spyInstance.mockImplementationOnce(async () => {
|
||||
// chained response, if the HTTP GET function gets called again, return a successful response
|
||||
const fullResponse = await constructResponse(isGzip, fileContents)
|
||||
return {
|
||||
message: getDownloadResponseMessage(
|
||||
200,
|
||||
isGzip,
|
||||
fullResponse.length,
|
||||
fullResponse
|
||||
),
|
||||
readBody: emptyMockReadBody
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async function constructResponse(
|
||||
isGzip: boolean,
|
||||
plaintext: Buffer | string
|
||||
): Promise<Buffer> {
|
||||
if (isGzip) {
|
||||
return await promisify(gzip)(plaintext)
|
||||
} else if (typeof plaintext === 'string') {
|
||||
return Buffer.from(plaintext, defaultEncoding)
|
||||
} else {
|
||||
return plaintext
|
||||
}
|
||||
}
|
||||
|
||||
function getDownloadResponseMessage(
|
||||
httpResponseCode: number,
|
||||
isGzip: boolean,
|
||||
contentLength: number,
|
||||
response: Buffer | null
|
||||
): http.IncomingMessage {
|
||||
let readCallCount = 0
|
||||
const mockMessage = <http.IncomingMessage>new stream.Readable({
|
||||
read(size) {
|
||||
switch (readCallCount++) {
|
||||
case 0:
|
||||
if (!!response && response.byteLength > size) {
|
||||
throw new Error(
|
||||
`test response larger than requested size (${size})`
|
||||
)
|
||||
}
|
||||
this.push(response)
|
||||
break
|
||||
|
||||
default:
|
||||
// end the stream
|
||||
this.push(null)
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
mockMessage.statusCode = httpResponseCode
|
||||
mockMessage.headers = {
|
||||
'content-length': contentLength.toString()
|
||||
}
|
||||
|
||||
if (isGzip) {
|
||||
mockMessage.headers['content-encoding'] = 'gzip'
|
||||
}
|
||||
|
||||
return mockMessage
|
||||
}
|
||||
|
||||
/**
|
||||
* Setups up HTTP GET response when querying for container items
|
||||
*/
|
||||
function setupSuccessfulContainerItemsResponse(): void {
|
||||
jest.spyOn(HttpClient.prototype, 'get').mockImplementationOnce(async () => {
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
let mockReadBody = emptyMockReadBody
|
||||
|
||||
mockMessage.statusCode = 201
|
||||
const response: QueryArtifactResponse = {
|
||||
count: 2,
|
||||
value: [
|
||||
{
|
||||
containerId: 10000,
|
||||
scopeIdentifier: '00000000-0000-0000-0000-000000000000',
|
||||
path: 'artifact-name',
|
||||
itemType: 'folder',
|
||||
status: 'created',
|
||||
dateCreated: '2020-02-06T22:13:35.373Z',
|
||||
dateLastModified: '2020-02-06T22:13:35.453Z',
|
||||
createdBy: '82f0bf89-6e55-4e5a-b8b6-f75eb992578c',
|
||||
lastModifiedBy: '82f0bf89-6e55-4e5a-b8b6-f75eb992578c',
|
||||
itemLocation: `${configVariables.getRuntimeUrl()}/_apis/resources/Containers/10000?itemPath=artifact-name&metadata=True`,
|
||||
contentLocation: `${configVariables.getRuntimeUrl()}/_apis/resources/Containers/10000?itemPath=artifact-name`,
|
||||
contentId: ''
|
||||
},
|
||||
{
|
||||
containerId: 10000,
|
||||
scopeIdentifier: '00000000-0000-0000-0000-000000000000',
|
||||
path: 'artifact-name/file1.txt',
|
||||
itemType: 'file',
|
||||
status: 'created',
|
||||
dateCreated: '2020-02-06T22:13:35.373Z',
|
||||
dateLastModified: '2020-02-06T22:13:35.453Z',
|
||||
createdBy: '82f0bf89-6e55-4e5a-b8b6-f75eb992578c',
|
||||
lastModifiedBy: '82f0bf89-6e55-4e5a-b8b6-f75eb992578c',
|
||||
itemLocation: `${configVariables.getRuntimeUrl()}/_apis/resources/Containers/10000?itemPath=artifact-name%2Ffile1.txt&metadata=True`,
|
||||
contentLocation: `${configVariables.getRuntimeUrl()}/_apis/resources/Containers/10000?itemPath=artifact-name%2Ffile1.txt`,
|
||||
contentId: ''
|
||||
}
|
||||
]
|
||||
}
|
||||
const returnData: string = JSON.stringify(response, null, 2)
|
||||
mockReadBody = async function(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve(returnData)
|
||||
})
|
||||
}
|
||||
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: mockReadBody
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Setups up HTTP GET response for a generic failed request
|
||||
*/
|
||||
function setupFailedResponse(): void {
|
||||
jest.spyOn(HttpClient.prototype, 'get').mockImplementationOnce(async () => {
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
mockMessage.statusCode = 400
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: emptyMockReadBody
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async function checkDestinationFile(
|
||||
targetPath: string,
|
||||
expectedContents: Buffer
|
||||
): Promise<void> {
|
||||
const fileContents = await fs.readFile(targetPath)
|
||||
|
||||
expect(fileContents.byteLength).toEqual(expectedContents.byteLength)
|
||||
expect(fileContents.equals(expectedContents)).toBeTruthy()
|
||||
}
|
||||
})
|
|
@ -1,78 +0,0 @@
|
|||
import {
|
||||
checkArtifactName,
|
||||
checkArtifactFilePath
|
||||
} from '../src/internal/path-and-artifact-name-validation'
|
||||
import * as core from '@actions/core'
|
||||
|
||||
describe('Path and artifact name validation', () => {
|
||||
beforeAll(() => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
})
|
||||
|
||||
it('Check Artifact Name for any invalid characters', () => {
|
||||
const invalidNames = [
|
||||
'my\\artifact',
|
||||
'my/artifact',
|
||||
'my"artifact',
|
||||
'my:artifact',
|
||||
'my<artifact',
|
||||
'my>artifact',
|
||||
'my|artifact',
|
||||
'my*artifact',
|
||||
'my?artifact',
|
||||
''
|
||||
]
|
||||
for (const invalidName of invalidNames) {
|
||||
expect(() => {
|
||||
checkArtifactName(invalidName)
|
||||
}).toThrow()
|
||||
}
|
||||
|
||||
const validNames = [
|
||||
'my-normal-artifact',
|
||||
'myNormalArtifact',
|
||||
'm¥ñðrmålÄr†ï£å¢†'
|
||||
]
|
||||
for (const validName of validNames) {
|
||||
expect(() => {
|
||||
checkArtifactName(validName)
|
||||
}).not.toThrow()
|
||||
}
|
||||
})
|
||||
|
||||
it('Check Artifact File Path for any invalid characters', () => {
|
||||
const invalidNames = [
|
||||
'some/invalid"artifact/path',
|
||||
'some/invalid:artifact/path',
|
||||
'some/invalid<artifact/path',
|
||||
'some/invalid>artifact/path',
|
||||
'some/invalid|artifact/path',
|
||||
'some/invalid*artifact/path',
|
||||
'some/invalid?artifact/path',
|
||||
'some/invalid\rartifact/path',
|
||||
'some/invalid\nartifact/path',
|
||||
'some/invalid\r\nartifact/path',
|
||||
''
|
||||
]
|
||||
for (const invalidName of invalidNames) {
|
||||
expect(() => {
|
||||
checkArtifactFilePath(invalidName)
|
||||
}).toThrow()
|
||||
}
|
||||
|
||||
const validNames = [
|
||||
'my/perfectly-normal/artifact-path',
|
||||
'my/perfectly\\Normal/Artifact-path',
|
||||
'm¥/ñðrmål/Är†ï£å¢†'
|
||||
]
|
||||
for (const validName of validNames) {
|
||||
expect(() => {
|
||||
checkArtifactFilePath(validName)
|
||||
}).not.toThrow()
|
||||
}
|
||||
})
|
||||
})
|
|
@ -1,113 +0,0 @@
|
|||
import * as http from 'http'
|
||||
import * as net from 'net'
|
||||
import * as core from '@actions/core'
|
||||
import * as configVariables from '../src/internal/config-variables'
|
||||
import {retry} from '../src/internal/requestUtils'
|
||||
import {HttpClientResponse} from '@actions/http-client'
|
||||
|
||||
jest.mock('../src/internal/config-variables')
|
||||
|
||||
interface ITestResult {
|
||||
responseCode: number
|
||||
errorMessage: string | null
|
||||
}
|
||||
|
||||
async function testRetry(
|
||||
responseCodes: number[],
|
||||
expectedResult: ITestResult
|
||||
): Promise<void> {
|
||||
const reverse = responseCodes.reverse() // Reverse responses since we pop from end
|
||||
if (expectedResult.errorMessage) {
|
||||
// we expect some exception to be thrown
|
||||
expect(
|
||||
retry(
|
||||
'test',
|
||||
async () => handleResponse(reverse.pop()),
|
||||
new Map(), // extra error message for any particular http codes
|
||||
configVariables.getRetryLimit()
|
||||
)
|
||||
).rejects.toThrow(expectedResult.errorMessage)
|
||||
} else {
|
||||
// we expect a correct status code to be returned
|
||||
const actualResult = await retry(
|
||||
'test',
|
||||
async () => handleResponse(reverse.pop()),
|
||||
new Map(), // extra error message for any particular http codes
|
||||
configVariables.getRetryLimit()
|
||||
)
|
||||
expect(actualResult.message.statusCode).toEqual(expectedResult.responseCode)
|
||||
}
|
||||
}
|
||||
|
||||
async function handleResponse(
|
||||
testResponseCode: number | undefined
|
||||
): Promise<HttpClientResponse> {
|
||||
if (!testResponseCode) {
|
||||
throw new Error(
|
||||
'Test incorrectly set up. reverse.pop() was called too many times so not enough test response codes were supplied'
|
||||
)
|
||||
}
|
||||
|
||||
return setupSingleMockResponse(testResponseCode)
|
||||
}
|
||||
|
||||
beforeAll(async () => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'error').mockImplementation(() => {})
|
||||
})
|
||||
|
||||
/**
|
||||
* Helpers used to setup mocking for the HttpClient
|
||||
*/
|
||||
async function emptyMockReadBody(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
async function setupSingleMockResponse(
|
||||
statusCode: number
|
||||
): Promise<HttpClientResponse> {
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
const mockReadBody = emptyMockReadBody
|
||||
mockMessage.statusCode = statusCode
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: mockReadBody
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
test('retry works on successful response', async () => {
|
||||
await testRetry([200], {
|
||||
responseCode: 200,
|
||||
errorMessage: null
|
||||
})
|
||||
})
|
||||
|
||||
test('retry works after retryable status code', async () => {
|
||||
await testRetry([503, 200], {
|
||||
responseCode: 200,
|
||||
errorMessage: null
|
||||
})
|
||||
})
|
||||
|
||||
test('retry fails after exhausting retries', async () => {
|
||||
// __mocks__/config-variables caps the max retry count in tests to 2
|
||||
await testRetry([503, 503, 200], {
|
||||
responseCode: 200,
|
||||
errorMessage: 'test failed: Artifact service responded with 503'
|
||||
})
|
||||
})
|
||||
|
||||
test('retry fails after non-retryable status code', async () => {
|
||||
await testRetry([400, 200], {
|
||||
responseCode: 400,
|
||||
errorMessage: 'test failed: Artifact service responded with 400'
|
||||
})
|
||||
})
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
path="$1"
|
||||
expectedContent="$2"
|
||||
|
||||
if [ "$path" == "" ]; then
|
||||
echo "File path not provided"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$expectedContent" == "" ]; then
|
||||
echo "Expected file contents not provided"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$path" ]; then
|
||||
echo "Expected file $path does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
actualContent=$(cat "$path")
|
||||
if [ "$expectedContent" == "_EMPTY_" ] && [ ! -s "$path" ]; then
|
||||
exit 0
|
||||
elif [ "$actualContent" != "$expectedContent" ]; then
|
||||
echo "File contents are not correct, expected $expectedContent, received $actualContent"
|
||||
exit 1
|
||||
fi
|
|
@ -1,154 +0,0 @@
|
|||
import * as core from '@actions/core'
|
||||
import * as tmp from 'tmp-promise'
|
||||
import * as path from 'path'
|
||||
import * as io from '../../io/src/io'
|
||||
import {promises as fs} from 'fs'
|
||||
import {createGZipFileOnDisk} from '../src/internal/upload-gzip'
|
||||
|
||||
const root = path.join(__dirname, '_temp', 'upload-gzip')
|
||||
const tempGzFilePath = path.join(root, 'file.gz')
|
||||
const tempGzipFilePath = path.join(root, 'file.gzip')
|
||||
const tempTgzFilePath = path.join(root, 'file.tgz')
|
||||
const tempTazFilePath = path.join(root, 'file.taz')
|
||||
const tempZFilePath = path.join(root, 'file.Z')
|
||||
const tempTaZFilePath = path.join(root, 'file.taZ')
|
||||
const tempBz2FilePath = path.join(root, 'file.bz2')
|
||||
const tempTbzFilePath = path.join(root, 'file.tbz')
|
||||
const tempTbz2FilePath = path.join(root, 'file.tbz2')
|
||||
const tempTz2FilePath = path.join(root, 'file.tz2')
|
||||
const tempLzFilePath = path.join(root, 'file.lz')
|
||||
const tempLzmaFilePath = path.join(root, 'file.lzma')
|
||||
const tempTlzFilePath = path.join(root, 'file.tlz')
|
||||
const tempLzoFilePath = path.join(root, 'file.lzo')
|
||||
const tempXzFilePath = path.join(root, 'file.xz')
|
||||
const tempTxzFilePath = path.join(root, 'file.txz')
|
||||
const tempZstFilePath = path.join(root, 'file.zst')
|
||||
const tempZstdFilePath = path.join(root, 'file.zstd')
|
||||
const tempTzstFilePath = path.join(root, 'file.tzst')
|
||||
const tempZipFilePath = path.join(root, 'file.zip')
|
||||
const temp7zFilePath = path.join(root, 'file.7z')
|
||||
const tempNormalFilePath = path.join(root, 'file.txt')
|
||||
|
||||
jest.mock('../src/internal/config-variables')
|
||||
|
||||
beforeAll(async () => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'error').mockImplementation(() => {})
|
||||
|
||||
// clear temp directory and create files that will be "uploaded"
|
||||
await io.rmRF(root)
|
||||
await fs.mkdir(path.join(root))
|
||||
await fs.writeFile(tempGzFilePath, 'a file with a .gz file extension')
|
||||
await fs.writeFile(tempGzipFilePath, 'a file with a .gzip file extension')
|
||||
await fs.writeFile(tempTgzFilePath, 'a file with a .tgz file extension')
|
||||
await fs.writeFile(tempTazFilePath, 'a file with a .taz file extension')
|
||||
await fs.writeFile(tempZFilePath, 'a file with a .Z file extension')
|
||||
await fs.writeFile(tempTaZFilePath, 'a file with a .taZ file extension')
|
||||
await fs.writeFile(tempBz2FilePath, 'a file with a .bz2 file extension')
|
||||
await fs.writeFile(tempTbzFilePath, 'a file with a .tbz file extension')
|
||||
await fs.writeFile(tempTbz2FilePath, 'a file with a .tbz2 file extension')
|
||||
await fs.writeFile(tempTz2FilePath, 'a file with a .tz2 file extension')
|
||||
await fs.writeFile(tempLzFilePath, 'a file with a .lz file extension')
|
||||
await fs.writeFile(tempLzmaFilePath, 'a file with a .lzma file extension')
|
||||
await fs.writeFile(tempTlzFilePath, 'a file with a .tlz file extension')
|
||||
await fs.writeFile(tempLzoFilePath, 'a file with a .lzo file extension')
|
||||
await fs.writeFile(tempXzFilePath, 'a file with a .xz file extension')
|
||||
await fs.writeFile(tempTxzFilePath, 'a file with a .txz file extension')
|
||||
await fs.writeFile(tempZstFilePath, 'a file with a .zst file extension')
|
||||
await fs.writeFile(tempZstdFilePath, 'a file with a .zstd file extension')
|
||||
await fs.writeFile(tempTzstFilePath, 'a file with a .tzst file extension')
|
||||
await fs.writeFile(tempZipFilePath, 'a file with a .zip file extension')
|
||||
await fs.writeFile(temp7zFilePath, 'a file with a .7z file extension')
|
||||
await fs.writeFile(tempNormalFilePath, 'a file with a .txt file extension')
|
||||
})
|
||||
|
||||
test('Number.MAX_SAFE_INTEGER is returned when an existing compressed file is used', async () => {
|
||||
// create temporary file
|
||||
const tempFile = await tmp.file()
|
||||
|
||||
expect(await createGZipFileOnDisk(tempGzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempGzipFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTgzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTazFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempZFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTaZFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempBz2FilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTbzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTbz2FilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTz2FilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempLzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempLzmaFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTlzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempLzoFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempXzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTxzFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempZstFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempZstdFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempTzstFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(tempZipFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(await createGZipFileOnDisk(temp7zFilePath, tempFile.path)).toEqual(
|
||||
Number.MAX_SAFE_INTEGER
|
||||
)
|
||||
expect(
|
||||
await createGZipFileOnDisk(tempNormalFilePath, tempFile.path)
|
||||
).not.toEqual(Number.MAX_SAFE_INTEGER)
|
||||
})
|
||||
|
||||
test('gzip file on disk gets successfully created', async () => {
|
||||
// create temporary file
|
||||
const tempFile = await tmp.file()
|
||||
|
||||
const gzipFileSize = await createGZipFileOnDisk(
|
||||
tempNormalFilePath,
|
||||
tempFile.path
|
||||
)
|
||||
const fileStat = await fs.stat(tempNormalFilePath)
|
||||
const totalFileSize = fileStat.size
|
||||
|
||||
// original file and gzip file should not be equal in size
|
||||
expect(gzipFileSize).not.toEqual(totalFileSize)
|
||||
})
|
|
@ -1,353 +0,0 @@
|
|||
import * as io from '../../io/src/io'
|
||||
import * as path from 'path'
|
||||
import {promises as fs} from 'fs'
|
||||
import * as core from '@actions/core'
|
||||
import {getUploadSpecification} from '../src/internal/upload-specification'
|
||||
|
||||
const artifactName = 'my-artifact'
|
||||
const root = path.join(__dirname, '_temp', 'upload-specification')
|
||||
const goodItem1Path = path.join(
|
||||
root,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'good-item1.txt'
|
||||
)
|
||||
const goodItem2Path = path.join(root, 'folder-d', 'good-item2.txt')
|
||||
const goodItem3Path = path.join(root, 'folder-d', 'good-item3.txt')
|
||||
const goodItem4Path = path.join(root, 'folder-d', 'good-item4.txt')
|
||||
const goodItem5Path = path.join(root, 'good-item5.txt')
|
||||
const badItem1Path = path.join(
|
||||
root,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'bad-item1.txt'
|
||||
)
|
||||
const badItem2Path = path.join(root, 'folder-d', 'bad-item2.txt')
|
||||
const badItem3Path = path.join(root, 'folder-f', 'bad-item3.txt')
|
||||
const badItem4Path = path.join(root, 'folder-h', 'folder-i', 'bad-item4.txt')
|
||||
const badItem5Path = path.join(root, 'folder-h', 'folder-i', 'bad-item5.txt')
|
||||
const extraFileInFolderCPath = path.join(
|
||||
root,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'extra-file-in-folder-c.txt'
|
||||
)
|
||||
const amazingFileInFolderHPath = path.join(root, 'folder-h', 'amazing-item.txt')
|
||||
|
||||
const artifactFilesToUpload = [
|
||||
goodItem1Path,
|
||||
goodItem2Path,
|
||||
goodItem3Path,
|
||||
goodItem4Path,
|
||||
goodItem5Path,
|
||||
extraFileInFolderCPath,
|
||||
amazingFileInFolderHPath
|
||||
]
|
||||
|
||||
describe('Search', () => {
|
||||
beforeAll(async () => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
|
||||
// clear temp directory
|
||||
await io.rmRF(root)
|
||||
await fs.mkdir(path.join(root, 'folder-a', 'folder-b', 'folder-c'), {
|
||||
recursive: true
|
||||
})
|
||||
await fs.mkdir(path.join(root, 'folder-a', 'folder-b', 'folder-e'), {
|
||||
recursive: true
|
||||
})
|
||||
await fs.mkdir(path.join(root, 'folder-d'), {
|
||||
recursive: true
|
||||
})
|
||||
await fs.mkdir(path.join(root, 'folder-f'), {
|
||||
recursive: true
|
||||
})
|
||||
await fs.mkdir(path.join(root, 'folder-g'), {
|
||||
recursive: true
|
||||
})
|
||||
await fs.mkdir(path.join(root, 'folder-h', 'folder-i'), {
|
||||
recursive: true
|
||||
})
|
||||
|
||||
await fs.writeFile(goodItem1Path, 'good item1 file')
|
||||
await fs.writeFile(goodItem2Path, 'good item2 file')
|
||||
await fs.writeFile(goodItem3Path, 'good item3 file')
|
||||
await fs.writeFile(goodItem4Path, 'good item4 file')
|
||||
await fs.writeFile(goodItem5Path, 'good item5 file')
|
||||
|
||||
await fs.writeFile(badItem1Path, 'bad item1 file')
|
||||
await fs.writeFile(badItem2Path, 'bad item2 file')
|
||||
await fs.writeFile(badItem3Path, 'bad item3 file')
|
||||
await fs.writeFile(badItem4Path, 'bad item4 file')
|
||||
await fs.writeFile(badItem5Path, 'bad item5 file')
|
||||
|
||||
await fs.writeFile(extraFileInFolderCPath, 'extra file')
|
||||
|
||||
await fs.writeFile(amazingFileInFolderHPath, 'amazing file')
|
||||
/*
|
||||
Directory structure of files that get created:
|
||||
root/
|
||||
folder-a/
|
||||
folder-b/
|
||||
folder-c/
|
||||
good-item1.txt
|
||||
bad-item1.txt
|
||||
extra-file-in-folder-c.txt
|
||||
folder-e/
|
||||
folder-d/
|
||||
good-item2.txt
|
||||
good-item3.txt
|
||||
good-item4.txt
|
||||
bad-item2.txt
|
||||
folder-f/
|
||||
bad-item3.txt
|
||||
folder-g/
|
||||
folder-h/
|
||||
amazing-item.txt
|
||||
folder-i/
|
||||
bad-item4.txt
|
||||
bad-item5.txt
|
||||
good-item5.txt
|
||||
*/
|
||||
})
|
||||
|
||||
it('Upload Specification - Fail non-existent rootDirectory', async () => {
|
||||
const invalidRootDirectory = path.join(
|
||||
__dirname,
|
||||
'_temp',
|
||||
'upload-specification-invalid'
|
||||
)
|
||||
expect(() => {
|
||||
getUploadSpecification(
|
||||
artifactName,
|
||||
invalidRootDirectory,
|
||||
artifactFilesToUpload
|
||||
)
|
||||
}).toThrow(`Provided rootDirectory ${invalidRootDirectory} does not exist`)
|
||||
})
|
||||
|
||||
it('Upload Specification - Fail invalid rootDirectory', async () => {
|
||||
expect(() => {
|
||||
getUploadSpecification(artifactName, goodItem1Path, artifactFilesToUpload)
|
||||
}).toThrow(
|
||||
`Provided rootDirectory ${goodItem1Path} is not a valid directory`
|
||||
)
|
||||
})
|
||||
|
||||
it('Upload Specification - File does not exist', async () => {
|
||||
const fakeFilePath = path.join(
|
||||
artifactName,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'non-existent-file.txt'
|
||||
)
|
||||
expect(() => {
|
||||
getUploadSpecification(artifactName, root, [fakeFilePath])
|
||||
}).toThrow(`File ${fakeFilePath} does not exist`)
|
||||
})
|
||||
|
||||
it('Upload Specification - Non parent directory', async () => {
|
||||
const folderADirectory = path.join(root, 'folder-a')
|
||||
const artifactFiles = [
|
||||
goodItem1Path,
|
||||
badItem1Path,
|
||||
extraFileInFolderCPath,
|
||||
goodItem5Path
|
||||
]
|
||||
expect(() => {
|
||||
getUploadSpecification(artifactName, folderADirectory, artifactFiles)
|
||||
}).toThrow(
|
||||
`The rootDirectory: ${folderADirectory} is not a parent directory of the file: ${goodItem5Path}`
|
||||
)
|
||||
})
|
||||
|
||||
it('Upload Specification - Success', async () => {
|
||||
const specifications = getUploadSpecification(
|
||||
artifactName,
|
||||
root,
|
||||
artifactFilesToUpload
|
||||
)
|
||||
expect(specifications.length).toEqual(7)
|
||||
|
||||
const absolutePaths = specifications.map(item => item.absoluteFilePath)
|
||||
expect(absolutePaths).toContain(goodItem1Path)
|
||||
expect(absolutePaths).toContain(goodItem2Path)
|
||||
expect(absolutePaths).toContain(goodItem3Path)
|
||||
expect(absolutePaths).toContain(goodItem4Path)
|
||||
expect(absolutePaths).toContain(goodItem5Path)
|
||||
expect(absolutePaths).toContain(extraFileInFolderCPath)
|
||||
expect(absolutePaths).toContain(amazingFileInFolderHPath)
|
||||
|
||||
for (const specification of specifications) {
|
||||
if (specification.absoluteFilePath === goodItem1Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(
|
||||
artifactName,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'good-item1.txt'
|
||||
)
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem2Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item2.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem3Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item3.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem4Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item4.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem5Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'good-item5.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === extraFileInFolderCPath) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(
|
||||
artifactName,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'extra-file-in-folder-c.txt'
|
||||
)
|
||||
)
|
||||
} else if (specification.absoluteFilePath === amazingFileInFolderHPath) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-h', 'amazing-item.txt')
|
||||
)
|
||||
} else {
|
||||
throw new Error(
|
||||
'Invalid specification found. This should never be reached'
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it('Upload Specification - Success with extra slash', async () => {
|
||||
const rootWithSlash = `${root}/`
|
||||
const specifications = getUploadSpecification(
|
||||
artifactName,
|
||||
rootWithSlash,
|
||||
artifactFilesToUpload
|
||||
)
|
||||
expect(specifications.length).toEqual(7)
|
||||
|
||||
const absolutePaths = specifications.map(item => item.absoluteFilePath)
|
||||
expect(absolutePaths).toContain(goodItem1Path)
|
||||
expect(absolutePaths).toContain(goodItem2Path)
|
||||
expect(absolutePaths).toContain(goodItem3Path)
|
||||
expect(absolutePaths).toContain(goodItem4Path)
|
||||
expect(absolutePaths).toContain(goodItem5Path)
|
||||
expect(absolutePaths).toContain(extraFileInFolderCPath)
|
||||
expect(absolutePaths).toContain(amazingFileInFolderHPath)
|
||||
|
||||
for (const specification of specifications) {
|
||||
if (specification.absoluteFilePath === goodItem1Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(
|
||||
artifactName,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'good-item1.txt'
|
||||
)
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem2Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item2.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem3Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item3.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem4Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item4.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem5Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'good-item5.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === extraFileInFolderCPath) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(
|
||||
artifactName,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'extra-file-in-folder-c.txt'
|
||||
)
|
||||
)
|
||||
} else if (specification.absoluteFilePath === amazingFileInFolderHPath) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-h', 'amazing-item.txt')
|
||||
)
|
||||
} else {
|
||||
throw new Error(
|
||||
'Invalid specification found. This should never be reached'
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it('Upload Specification - Directories should not be included', async () => {
|
||||
const folderEPath = path.join(root, 'folder-a', 'folder-b', 'folder-e')
|
||||
const filesWithDirectory = [
|
||||
goodItem1Path,
|
||||
goodItem4Path,
|
||||
folderEPath,
|
||||
badItem3Path
|
||||
]
|
||||
const specifications = getUploadSpecification(
|
||||
artifactName,
|
||||
root,
|
||||
filesWithDirectory
|
||||
)
|
||||
expect(specifications.length).toEqual(3)
|
||||
const absolutePaths = specifications.map(item => item.absoluteFilePath)
|
||||
expect(absolutePaths).toContain(goodItem1Path)
|
||||
expect(absolutePaths).toContain(goodItem4Path)
|
||||
expect(absolutePaths).toContain(badItem3Path)
|
||||
|
||||
for (const specification of specifications) {
|
||||
if (specification.absoluteFilePath === goodItem1Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(
|
||||
artifactName,
|
||||
'folder-a',
|
||||
'folder-b',
|
||||
'folder-c',
|
||||
'good-item1.txt'
|
||||
)
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem2Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item2.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === goodItem4Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-d', 'good-item4.txt')
|
||||
)
|
||||
} else if (specification.absoluteFilePath === badItem3Path) {
|
||||
expect(specification.uploadFilePath).toEqual(
|
||||
path.join(artifactName, 'folder-f', 'bad-item3.txt')
|
||||
)
|
||||
} else {
|
||||
throw new Error(
|
||||
'Invalid specification found. This should never be reached'
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
|
@ -1,551 +0,0 @@
|
|||
import * as http from 'http'
|
||||
import * as io from '../../io/src/io'
|
||||
import * as net from 'net'
|
||||
import * as path from 'path'
|
||||
import {mocked} from 'ts-jest/utils'
|
||||
import {exec, execSync} from 'child_process'
|
||||
import {createGunzip} from 'zlib'
|
||||
import {promisify} from 'util'
|
||||
import {UploadHttpClient} from '../src/internal/upload-http-client'
|
||||
import * as core from '@actions/core'
|
||||
import {promises as fs} from 'fs'
|
||||
import {getRuntimeUrl} from '../src/internal/config-variables'
|
||||
import {HttpClient, HttpClientResponse} from '@actions/http-client'
|
||||
import {
|
||||
ArtifactResponse,
|
||||
PatchArtifactSizeSuccessResponse
|
||||
} from '../src/internal/contracts'
|
||||
import {UploadSpecification} from '../src/internal/upload-specification'
|
||||
import {getArtifactUrl} from '../src/internal/utils'
|
||||
import {UploadOptions} from '../src/internal/upload-options'
|
||||
|
||||
const root = path.join(__dirname, '_temp', 'artifact-upload')
|
||||
const file1Path = path.join(root, 'file1.txt')
|
||||
const file2Path = path.join(root, 'file2.txt')
|
||||
const file3Path = path.join(root, 'folder1', 'file3.txt')
|
||||
const file4Path = path.join(root, 'folder1', 'file4.txt')
|
||||
const file5Path = path.join(root, 'folder1', 'folder2', 'folder3', 'file5.txt')
|
||||
|
||||
let file1Size = 0
|
||||
let file2Size = 0
|
||||
let file3Size = 0
|
||||
let file4Size = 0
|
||||
let file5Size = 0
|
||||
|
||||
jest.mock('../src/internal/config-variables')
|
||||
jest.mock('@actions/http-client')
|
||||
|
||||
describe('Upload Tests', () => {
|
||||
beforeAll(async () => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'error').mockImplementation(() => {})
|
||||
|
||||
// setup mocking for calls that got through the HttpClient
|
||||
setupHttpClientMock()
|
||||
|
||||
// clear temp directory and create files that will be "uploaded"
|
||||
await io.rmRF(root)
|
||||
await fs.mkdir(path.join(root, 'folder1', 'folder2', 'folder3'), {
|
||||
recursive: true
|
||||
})
|
||||
await fs.writeFile(file1Path, 'this is file 1')
|
||||
await fs.writeFile(file2Path, 'this is file 2')
|
||||
await fs.writeFile(file3Path, 'this is file 3')
|
||||
await fs.writeFile(file4Path, 'this is file 4')
|
||||
await fs.writeFile(file5Path, 'this is file 5')
|
||||
/*
|
||||
Directory structure for files that get created:
|
||||
root/
|
||||
file1.txt
|
||||
file2.txt
|
||||
folder1/
|
||||
file3.txt
|
||||
file4.txt
|
||||
folder2/
|
||||
folder3/
|
||||
file5.txt
|
||||
*/
|
||||
|
||||
file1Size = (await fs.stat(file1Path)).size
|
||||
file2Size = (await fs.stat(file2Path)).size
|
||||
file3Size = (await fs.stat(file3Path)).size
|
||||
file4Size = (await fs.stat(file4Path)).size
|
||||
file5Size = (await fs.stat(file5Path)).size
|
||||
})
|
||||
|
||||
/**
|
||||
* Artifact Creation Tests
|
||||
*/
|
||||
it('Create Artifact - Success', async () => {
|
||||
const artifactName = 'valid-artifact-name'
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const response = await uploadHttpClient.createArtifactInFileContainer(
|
||||
artifactName
|
||||
)
|
||||
expect(response.containerId).toEqual('13')
|
||||
expect(response.size).toEqual(-1)
|
||||
expect(response.signedContent).toEqual('false')
|
||||
expect(response.fileContainerResourceUrl).toEqual(
|
||||
`${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
)
|
||||
expect(response.type).toEqual('actions_storage')
|
||||
expect(response.name).toEqual(artifactName)
|
||||
expect(response.url).toEqual(
|
||||
`${getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=${artifactName}`
|
||||
)
|
||||
})
|
||||
|
||||
it('Create Artifact - Failure', async () => {
|
||||
const artifactName = 'invalid-artifact-name'
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
expect(
|
||||
uploadHttpClient.createArtifactInFileContainer(artifactName)
|
||||
).rejects.toEqual(
|
||||
new Error(
|
||||
`Create Artifact Container failed: The artifact name invalid-artifact-name is not valid. Request URL ${getArtifactUrl()}`
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
it('Create Artifact - Retention Less Than Min Value Error', async () => {
|
||||
const artifactName = 'valid-artifact-name'
|
||||
const options: UploadOptions = {
|
||||
retentionDays: -1
|
||||
}
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
expect(
|
||||
uploadHttpClient.createArtifactInFileContainer(artifactName, options)
|
||||
).rejects.toEqual(new Error('Invalid retention, minimum value is 1.'))
|
||||
})
|
||||
|
||||
it('Create Artifact - Storage Quota Error', async () => {
|
||||
const artifactName = 'storage-quota-hit'
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
expect(
|
||||
uploadHttpClient.createArtifactInFileContainer(artifactName)
|
||||
).rejects.toEqual(
|
||||
new Error(
|
||||
'Create Artifact Container failed: Artifact storage quota has been hit. Unable to upload any new artifacts'
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
/**
|
||||
* Artifact Upload Tests
|
||||
*/
|
||||
it('Upload Artifact - Success', async () => {
|
||||
/**
|
||||
* Normally search.findFilesToUpload() would be used for providing information about what to upload. These tests however
|
||||
* focuses solely on the upload APIs so searchResult[] will be hard-coded
|
||||
*/
|
||||
const artifactName = 'successful-artifact'
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: file1Path,
|
||||
uploadFilePath: `${artifactName}/file1.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file2Path,
|
||||
uploadFilePath: `${artifactName}/file2.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file3Path,
|
||||
uploadFilePath: `${artifactName}/folder1/file3.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file4Path,
|
||||
uploadFilePath: `${artifactName}/folder1/file4.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file5Path,
|
||||
uploadFilePath: `${artifactName}/folder1/folder2/folder3/file5.txt`
|
||||
}
|
||||
]
|
||||
|
||||
const expectedTotalSize =
|
||||
file1Size + file2Size + file3Size + file4Size + file5Size
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification
|
||||
)
|
||||
expect(uploadResult.failedItems.length).toEqual(0)
|
||||
expect(uploadResult.uploadSize).toEqual(expectedTotalSize)
|
||||
})
|
||||
|
||||
function hasMkfifo(): boolean {
|
||||
try {
|
||||
// make sure we drain the stdout
|
||||
return (
|
||||
process.platform !== 'win32' &&
|
||||
execSync('which mkfifo').toString().length > 0
|
||||
)
|
||||
} catch (e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
const withMkfifoIt = hasMkfifo() ? it : it.skip
|
||||
withMkfifoIt(
|
||||
'Upload Artifact with content from named pipe - Success',
|
||||
async () => {
|
||||
// create a named pipe 'pipe' with content 'hello pipe'
|
||||
const content = Buffer.from('hello pipe')
|
||||
const pipeFilePath = path.join(root, 'pipe')
|
||||
await promisify(exec)('mkfifo pipe', {cwd: root})
|
||||
// don't want to await here as that would block until read
|
||||
fs.writeFile(pipeFilePath, content)
|
||||
|
||||
const artifactName = 'successful-artifact'
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: pipeFilePath,
|
||||
uploadFilePath: `${artifactName}/pipe`
|
||||
}
|
||||
]
|
||||
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification
|
||||
)
|
||||
|
||||
// accesses the ReadableStream that was passed into sendStream
|
||||
// eslint-disable-next-line @typescript-eslint/unbound-method
|
||||
const stream = mocked(HttpClient.prototype.sendStream).mock.calls[0][2]
|
||||
expect(stream).not.toBeNull()
|
||||
// decompresses the passed stream
|
||||
const data: Buffer[] = []
|
||||
for await (const chunk of stream.pipe(createGunzip())) {
|
||||
data.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk as string))
|
||||
}
|
||||
const uploaded = Buffer.concat(data)
|
||||
|
||||
expect(uploadResult.failedItems.length).toEqual(0)
|
||||
expect(uploaded).toEqual(content)
|
||||
}
|
||||
)
|
||||
|
||||
it('Upload Artifact - Failed Single File Upload', async () => {
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: file1Path,
|
||||
uploadFilePath: `this-file-upload-will-fail`
|
||||
}
|
||||
]
|
||||
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification
|
||||
)
|
||||
expect(uploadResult.failedItems.length).toEqual(1)
|
||||
expect(uploadResult.uploadSize).toEqual(0)
|
||||
})
|
||||
|
||||
it('Upload Artifact - Partial Upload Continue On Error', async () => {
|
||||
const artifactName = 'partial-artifact'
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: file1Path,
|
||||
uploadFilePath: `${artifactName}/file1.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file2Path,
|
||||
uploadFilePath: `${artifactName}/file2.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file3Path,
|
||||
uploadFilePath: `${artifactName}/folder1/file3.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file4Path,
|
||||
uploadFilePath: `this-file-upload-will-fail`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file5Path,
|
||||
uploadFilePath: `${artifactName}/folder1/folder2/folder3/file5.txt`
|
||||
}
|
||||
]
|
||||
|
||||
const expectedPartialSize = file1Size + file2Size + file4Size + file5Size
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification,
|
||||
{continueOnError: true}
|
||||
)
|
||||
expect(uploadResult.failedItems.length).toEqual(1)
|
||||
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
|
||||
})
|
||||
|
||||
it('Upload Artifact - Partial Upload Fail Fast', async () => {
|
||||
const artifactName = 'partial-artifact'
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: file1Path,
|
||||
uploadFilePath: `${artifactName}/file1.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file2Path,
|
||||
uploadFilePath: `${artifactName}/file2.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file3Path,
|
||||
uploadFilePath: `${artifactName}/folder1/file3.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file4Path,
|
||||
uploadFilePath: `this-file-upload-will-fail`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file5Path,
|
||||
uploadFilePath: `${artifactName}/folder1/folder2/folder3/file5.txt`
|
||||
}
|
||||
]
|
||||
|
||||
const expectedPartialSize = file1Size + file2Size + file3Size
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification,
|
||||
{continueOnError: false}
|
||||
)
|
||||
expect(uploadResult.failedItems.length).toEqual(2)
|
||||
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
|
||||
})
|
||||
|
||||
it('Upload Artifact - Failed upload with no options', async () => {
|
||||
const artifactName = 'partial-artifact'
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: file1Path,
|
||||
uploadFilePath: `${artifactName}/file1.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file2Path,
|
||||
uploadFilePath: `${artifactName}/file2.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file3Path,
|
||||
uploadFilePath: `${artifactName}/folder1/file3.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file4Path,
|
||||
uploadFilePath: `this-file-upload-will-fail`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file5Path,
|
||||
uploadFilePath: `${artifactName}/folder1/folder2/folder3/file5.txt`
|
||||
}
|
||||
]
|
||||
|
||||
const expectedPartialSize = file1Size + file2Size + file3Size + file5Size
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification
|
||||
)
|
||||
expect(uploadResult.failedItems.length).toEqual(1)
|
||||
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
|
||||
})
|
||||
|
||||
it('Upload Artifact - Failed upload with empty options', async () => {
|
||||
const artifactName = 'partial-artifact'
|
||||
const uploadSpecification: UploadSpecification[] = [
|
||||
{
|
||||
absoluteFilePath: file1Path,
|
||||
uploadFilePath: `${artifactName}/file1.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file2Path,
|
||||
uploadFilePath: `${artifactName}/file2.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file3Path,
|
||||
uploadFilePath: `${artifactName}/folder1/file3.txt`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file4Path,
|
||||
uploadFilePath: `this-file-upload-will-fail`
|
||||
},
|
||||
{
|
||||
absoluteFilePath: file5Path,
|
||||
uploadFilePath: `${artifactName}/folder1/folder2/folder3/file5.txt`
|
||||
}
|
||||
]
|
||||
|
||||
const expectedPartialSize = file1Size + file2Size + file3Size + file5Size
|
||||
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
uploadUrl,
|
||||
uploadSpecification,
|
||||
{}
|
||||
)
|
||||
expect(uploadResult.failedItems.length).toEqual(1)
|
||||
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
|
||||
})
|
||||
|
||||
/**
|
||||
* Artifact Association Tests
|
||||
*/
|
||||
it('Associate Artifact - Success', async () => {
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
expect(async () => {
|
||||
uploadHttpClient.patchArtifactSize(130, 'my-artifact')
|
||||
}).not.toThrow()
|
||||
})
|
||||
|
||||
it('Associate Artifact - Not Found', async () => {
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
expect(
|
||||
uploadHttpClient.patchArtifactSize(100, 'non-existent-artifact')
|
||||
).rejects.toThrow(
|
||||
'An Artifact with the name non-existent-artifact was not found'
|
||||
)
|
||||
})
|
||||
|
||||
it('Associate Artifact - Error', async () => {
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
expect(
|
||||
uploadHttpClient.patchArtifactSize(-2, 'my-artifact')
|
||||
).rejects.toThrow(
|
||||
'Finalize artifact upload failed: Artifact service responded with 400'
|
||||
)
|
||||
})
|
||||
|
||||
/**
|
||||
* Helpers used to setup mocking for the HttpClient
|
||||
*/
|
||||
async function emptyMockReadBody(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
function setupHttpClientMock(): void {
|
||||
/**
|
||||
* Mocks Post calls that are used during Artifact Creation tests
|
||||
*
|
||||
* Simulates success and non-success status codes depending on the artifact name along with an appropriate
|
||||
* payload that represents an expected response
|
||||
*/
|
||||
jest
|
||||
.spyOn(HttpClient.prototype, 'post')
|
||||
.mockImplementation(async (requestdata, data) => {
|
||||
// parse the input data and use the provided artifact name as part of the response
|
||||
const inputData = JSON.parse(data)
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
let mockReadBody = emptyMockReadBody
|
||||
|
||||
if (inputData.Name === 'invalid-artifact-name') {
|
||||
mockMessage.statusCode = 400
|
||||
} else if (inputData.Name === 'storage-quota-hit') {
|
||||
mockMessage.statusCode = 403
|
||||
} else {
|
||||
mockMessage.statusCode = 201
|
||||
const response: ArtifactResponse = {
|
||||
containerId: '13',
|
||||
size: -1,
|
||||
signedContent: 'false',
|
||||
fileContainerResourceUrl: `${getRuntimeUrl()}_apis/resources/Containers/13`,
|
||||
type: 'actions_storage',
|
||||
name: inputData.Name,
|
||||
url: `${getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=${
|
||||
inputData.Name
|
||||
}`
|
||||
}
|
||||
const returnData: string = JSON.stringify(response, null, 2)
|
||||
mockReadBody = async function(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve(returnData)
|
||||
})
|
||||
}
|
||||
}
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: mockReadBody
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Mocks SendStream calls that are made during Artifact Upload tests
|
||||
*
|
||||
* A 500 response is used to simulate a failed upload stream. The uploadUrl can be set to
|
||||
* include 'fail' to specify that the upload should fail
|
||||
*/
|
||||
jest
|
||||
.spyOn(HttpClient.prototype, 'sendStream')
|
||||
.mockImplementation(async (verb, requestUrl) => {
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
mockMessage.statusCode = 200
|
||||
if (requestUrl.includes('fail')) {
|
||||
mockMessage.statusCode = 500
|
||||
}
|
||||
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: emptyMockReadBody
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Mocks Patch calls that are made during Artifact Association tests
|
||||
*
|
||||
* Simulates success and non-success status codes depending on the input size along with an appropriate
|
||||
* payload that represents an expected response
|
||||
*/
|
||||
jest
|
||||
.spyOn(HttpClient.prototype, 'patch')
|
||||
.mockImplementation(async (requestdata, data) => {
|
||||
const inputData = JSON.parse(data)
|
||||
const mockMessage = new http.IncomingMessage(new net.Socket())
|
||||
|
||||
// Get the name from the end of requestdata. Will be something like https://www.example.com/_apis/pipelines/workflows/15/artifacts?api-version=6.0-preview&artifactName=my-artifact
|
||||
const artifactName = requestdata.split('=')[2]
|
||||
let mockReadBody = emptyMockReadBody
|
||||
if (inputData.Size < 1) {
|
||||
mockMessage.statusCode = 400
|
||||
} else if (artifactName === 'non-existent-artifact') {
|
||||
mockMessage.statusCode = 404
|
||||
} else {
|
||||
mockMessage.statusCode = 200
|
||||
const response: PatchArtifactSizeSuccessResponse = {
|
||||
containerId: 13,
|
||||
size: inputData.Size,
|
||||
signedContent: 'false',
|
||||
type: 'actions_storage',
|
||||
name: artifactName,
|
||||
url: `${getRuntimeUrl()}_apis/pipelines/1/runs/1/artifacts?artifactName=${artifactName}`,
|
||||
uploadUrl: `${getRuntimeUrl()}_apis/resources/Containers/13`
|
||||
}
|
||||
const returnData: string = JSON.stringify(response, null, 2)
|
||||
mockReadBody = async function(): Promise<string> {
|
||||
return new Promise(resolve => {
|
||||
resolve(returnData)
|
||||
})
|
||||
}
|
||||
}
|
||||
return new Promise<HttpClientResponse>(resolve => {
|
||||
resolve({
|
||||
message: mockMessage,
|
||||
readBody: mockReadBody
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
|
@ -1,239 +0,0 @@
|
|||
import * as fs from 'fs'
|
||||
import * as io from '../../io/src/io'
|
||||
import * as path from 'path'
|
||||
import * as utils from '../src/internal/utils'
|
||||
import * as core from '@actions/core'
|
||||
import {HttpCodes} from '@actions/http-client'
|
||||
import {
|
||||
getRuntimeUrl,
|
||||
getWorkFlowRunId,
|
||||
getInitialRetryIntervalInMilliseconds,
|
||||
getRetryMultiplier
|
||||
} from '../src/internal/config-variables'
|
||||
import {Readable} from 'stream'
|
||||
|
||||
jest.mock('../src/internal/config-variables')
|
||||
|
||||
describe('Utils', () => {
|
||||
beforeAll(() => {
|
||||
// mock all output so that there is less noise when running tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'debug').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'info').mockImplementation(() => {})
|
||||
jest.spyOn(core, 'warning').mockImplementation(() => {})
|
||||
})
|
||||
|
||||
it('Check exponential retry range', () => {
|
||||
// No retries should return the initial retry interval
|
||||
const retryWaitTime0 = utils.getExponentialRetryTimeInMilliseconds(0)
|
||||
expect(retryWaitTime0).toEqual(getInitialRetryIntervalInMilliseconds())
|
||||
|
||||
const testMinMaxRange = (retryCount: number): void => {
|
||||
const retryWaitTime = utils.getExponentialRetryTimeInMilliseconds(
|
||||
retryCount
|
||||
)
|
||||
const minRange =
|
||||
getInitialRetryIntervalInMilliseconds() *
|
||||
getRetryMultiplier() *
|
||||
retryCount
|
||||
const maxRange = minRange * getRetryMultiplier()
|
||||
|
||||
expect(retryWaitTime).toBeGreaterThanOrEqual(minRange)
|
||||
expect(retryWaitTime).toBeLessThan(maxRange)
|
||||
}
|
||||
|
||||
for (let i = 1; i < 10; i++) {
|
||||
testMinMaxRange(i)
|
||||
}
|
||||
})
|
||||
|
||||
it('Test negative artifact retention throws', () => {
|
||||
expect(() => {
|
||||
utils.getProperRetention(-1, undefined)
|
||||
}).toThrow()
|
||||
})
|
||||
|
||||
it('Test no setting specified takes artifact retention input', () => {
|
||||
expect(utils.getProperRetention(180, undefined)).toEqual(180)
|
||||
})
|
||||
|
||||
it('Test artifact retention must conform to max allowed', () => {
|
||||
expect(utils.getProperRetention(180, '45')).toEqual(45)
|
||||
})
|
||||
|
||||
it('Test constructing artifact URL', () => {
|
||||
const runtimeUrl = getRuntimeUrl()
|
||||
const runId = getWorkFlowRunId()
|
||||
const artifactUrl = utils.getArtifactUrl()
|
||||
expect(artifactUrl).toEqual(
|
||||
`${runtimeUrl}_apis/pipelines/workflows/${runId}/artifacts?api-version=${utils.getApiVersion()}`
|
||||
)
|
||||
})
|
||||
|
||||
it('Test constructing upload headers with all optional parameters', () => {
|
||||
const contentType = 'application/octet-stream'
|
||||
const size = 24
|
||||
const uncompressedLength = 100
|
||||
const range = 'bytes 0-199/200'
|
||||
const digest = {
|
||||
crc64: 'bSzITYnW/P8=',
|
||||
md5: 'Xiv1fT9AxLbfadrxk2y3ZvgyN0tPwCWafL/wbi9w8mk='
|
||||
}
|
||||
const headers = utils.getUploadHeaders(
|
||||
contentType,
|
||||
true,
|
||||
true,
|
||||
uncompressedLength,
|
||||
size,
|
||||
range,
|
||||
digest
|
||||
)
|
||||
expect(Object.keys(headers).length).toEqual(10)
|
||||
expect(headers['Accept']).toEqual(
|
||||
`application/json;api-version=${utils.getApiVersion()}`
|
||||
)
|
||||
expect(headers['Content-Type']).toEqual(contentType)
|
||||
expect(headers['Connection']).toEqual('Keep-Alive')
|
||||
expect(headers['Keep-Alive']).toEqual('10')
|
||||
expect(headers['Content-Encoding']).toEqual('gzip')
|
||||
expect(headers['x-tfs-filelength']).toEqual(uncompressedLength)
|
||||
expect(headers['Content-Length']).toEqual(size)
|
||||
expect(headers['Content-Range']).toEqual(range)
|
||||
expect(headers['x-actions-results-crc64']).toEqual(digest.crc64)
|
||||
expect(headers['x-actions-results-md5']).toEqual(digest.md5)
|
||||
})
|
||||
|
||||
it('Test constructing upload headers with only required parameter', () => {
|
||||
const headers = utils.getUploadHeaders('application/octet-stream')
|
||||
expect(Object.keys(headers).length).toEqual(2)
|
||||
expect(headers['Accept']).toEqual(
|
||||
`application/json;api-version=${utils.getApiVersion()}`
|
||||
)
|
||||
expect(headers['Content-Type']).toEqual('application/octet-stream')
|
||||
})
|
||||
|
||||
it('Test constructing download headers with all optional parameters', () => {
|
||||
const contentType = 'application/json'
|
||||
const headers = utils.getDownloadHeaders(contentType, true, true)
|
||||
expect(Object.keys(headers).length).toEqual(5)
|
||||
expect(headers['Content-Type']).toEqual(contentType)
|
||||
expect(headers['Connection']).toEqual('Keep-Alive')
|
||||
expect(headers['Keep-Alive']).toEqual('10')
|
||||
expect(headers['Accept-Encoding']).toEqual('gzip')
|
||||
expect(headers['Accept']).toEqual(
|
||||
`application/octet-stream;api-version=${utils.getApiVersion()}`
|
||||
)
|
||||
})
|
||||
|
||||
it('Test constructing download headers with only required parameter', () => {
|
||||
const headers = utils.getDownloadHeaders('application/octet-stream')
|
||||
expect(Object.keys(headers).length).toEqual(2)
|
||||
expect(headers['Content-Type']).toEqual('application/octet-stream')
|
||||
// check for default accept type
|
||||
expect(headers['Accept']).toEqual(
|
||||
`application/json;api-version=${utils.getApiVersion()}`
|
||||
)
|
||||
})
|
||||
|
||||
it('Test Success Status Code', () => {
|
||||
expect(utils.isSuccessStatusCode(HttpCodes.OK)).toEqual(true)
|
||||
expect(utils.isSuccessStatusCode(201)).toEqual(true)
|
||||
expect(utils.isSuccessStatusCode(299)).toEqual(true)
|
||||
expect(utils.isSuccessStatusCode(HttpCodes.NotFound)).toEqual(false)
|
||||
expect(utils.isSuccessStatusCode(HttpCodes.BadGateway)).toEqual(false)
|
||||
expect(utils.isSuccessStatusCode(HttpCodes.Forbidden)).toEqual(false)
|
||||
})
|
||||
|
||||
it('Test Retry Status Code', () => {
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.BadGateway)).toEqual(true)
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.ServiceUnavailable)).toEqual(
|
||||
true
|
||||
)
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.GatewayTimeout)).toEqual(true)
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.TooManyRequests)).toEqual(true)
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.OK)).toEqual(false)
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.NotFound)).toEqual(false)
|
||||
expect(utils.isRetryableStatusCode(HttpCodes.Forbidden)).toEqual(false)
|
||||
expect(utils.isRetryableStatusCode(413)).toEqual(true) // Payload Too Large
|
||||
})
|
||||
|
||||
it('Test Throttled Status Code', () => {
|
||||
expect(utils.isThrottledStatusCode(HttpCodes.TooManyRequests)).toEqual(true)
|
||||
expect(utils.isThrottledStatusCode(HttpCodes.InternalServerError)).toEqual(
|
||||
false
|
||||
)
|
||||
expect(utils.isThrottledStatusCode(HttpCodes.BadGateway)).toEqual(false)
|
||||
expect(utils.isThrottledStatusCode(HttpCodes.ServiceUnavailable)).toEqual(
|
||||
false
|
||||
)
|
||||
})
|
||||
|
||||
it('Test Forbidden Status Code', () => {
|
||||
expect(utils.isForbiddenStatusCode(HttpCodes.Forbidden)).toEqual(true)
|
||||
expect(utils.isForbiddenStatusCode(HttpCodes.InternalServerError)).toEqual(
|
||||
false
|
||||
)
|
||||
expect(utils.isForbiddenStatusCode(HttpCodes.TooManyRequests)).toEqual(
|
||||
false
|
||||
)
|
||||
expect(utils.isForbiddenStatusCode(HttpCodes.OK)).toEqual(false)
|
||||
})
|
||||
|
||||
it('Test Creating Artifact Directories', async () => {
|
||||
const root = path.join(__dirname, '_temp', 'artifact-download')
|
||||
// remove directory before starting
|
||||
await io.rmRF(root)
|
||||
|
||||
const directory1 = path.join(root, 'folder2', 'folder3')
|
||||
const directory2 = path.join(directory1, 'folder1')
|
||||
|
||||
// Initially should not exist
|
||||
await expect(fs.promises.access(directory1)).rejects.not.toBeUndefined()
|
||||
await expect(fs.promises.access(directory2)).rejects.not.toBeUndefined()
|
||||
const directoryStructure = [directory1, directory2]
|
||||
await utils.createDirectoriesForArtifact(directoryStructure)
|
||||
// directories should now be created
|
||||
await expect(fs.promises.access(directory1)).resolves.toEqual(undefined)
|
||||
await expect(fs.promises.access(directory2)).resolves.toEqual(undefined)
|
||||
})
|
||||
|
||||
it('Test Creating Empty Files', async () => {
|
||||
const root = path.join(__dirname, '_temp', 'empty-files')
|
||||
await io.rmRF(root)
|
||||
|
||||
const emptyFile1 = path.join(root, 'emptyFile1')
|
||||
const directoryToCreate = path.join(root, 'folder1')
|
||||
const emptyFile2 = path.join(directoryToCreate, 'emptyFile2')
|
||||
|
||||
// empty files should only be created after the directory structure is fully setup
|
||||
// ensure they are first created by using the createDirectoriesForArtifact method
|
||||
const directoryStructure = [root, directoryToCreate]
|
||||
await utils.createDirectoriesForArtifact(directoryStructure)
|
||||
await expect(fs.promises.access(root)).resolves.toEqual(undefined)
|
||||
await expect(fs.promises.access(directoryToCreate)).resolves.toEqual(
|
||||
undefined
|
||||
)
|
||||
|
||||
await expect(fs.promises.access(emptyFile1)).rejects.not.toBeUndefined()
|
||||
await expect(fs.promises.access(emptyFile2)).rejects.not.toBeUndefined()
|
||||
|
||||
const emptyFilesToCreate = [emptyFile1, emptyFile2]
|
||||
await utils.createEmptyFilesForArtifact(emptyFilesToCreate)
|
||||
|
||||
await expect(fs.promises.access(emptyFile1)).resolves.toEqual(undefined)
|
||||
const size1 = (await fs.promises.stat(emptyFile1)).size
|
||||
expect(size1).toEqual(0)
|
||||
await expect(fs.promises.access(emptyFile2)).resolves.toEqual(undefined)
|
||||
const size2 = (await fs.promises.stat(emptyFile2)).size
|
||||
expect(size2).toEqual(0)
|
||||
})
|
||||
|
||||
it('Creates a digest from a readable stream', async () => {
|
||||
const data = 'lorem ipsum'
|
||||
const stream = Readable.from(data)
|
||||
const digest = await utils.digestForStream(stream)
|
||||
|
||||
expect(digest.crc64).toBe('bSzITYnW/P8=')
|
||||
expect(digest.md5).toBe('gKdR/eV3AoZAxBkADjPrpg==')
|
||||
})
|
||||
})
|
|
@ -1,53 +0,0 @@
|
|||
# Additional Information
|
||||
|
||||
Extra information
|
||||
- [Non-Supported Characters](#Non-Supported-Characters)
|
||||
- [Permission loss](#Permission-Loss)
|
||||
- [Considerations](#Considerations)
|
||||
- [Compression](#Is-my-artifact-compressed)
|
||||
|
||||
## Non-Supported Characters
|
||||
|
||||
When uploading an artifact, the inputted `name` parameter along with the files specified in `files` cannot contain any of the following characters. They will be rejected by the server if attempted to be sent over and the upload will fail. These characters are not allowed due to limitations and restrictions with certain file systems such as NTFS. To maintain platform-agnostic behavior, all characters that are not supported by an individual filesystem/platform will not be supported on all filesystems/platforms.
|
||||
|
||||
- "
|
||||
- :
|
||||
- <
|
||||
- \>
|
||||
- |
|
||||
- \*
|
||||
- ?
|
||||
|
||||
In addition to the aforementioned characters, the inputted `name` also cannot include the following
|
||||
- \
|
||||
- /
|
||||
|
||||
|
||||
## Permission Loss
|
||||
|
||||
File permissions are not maintained between uploaded and downloaded artifacts. If file permissions are something that need to be maintained (such as an executable), consider archiving all of the files using something like `tar` and then uploading the single archive. After downloading the artifact, you can `un-tar` the individual file and permissions will be preserved.
|
||||
|
||||
```js
|
||||
const artifact = require('@actions/artifact');
|
||||
const artifactClient = artifact.create()
|
||||
const artifactName = 'my-artifact';
|
||||
const files = [
|
||||
'/home/user/files/plz-upload/my-archive.tgz',
|
||||
]
|
||||
const rootDirectory = '/home/user/files/plz-upload'
|
||||
const uploadResult = await artifactClient.uploadArtifact(artifactName, files, rootDirectory)
|
||||
```
|
||||
|
||||
## Considerations
|
||||
|
||||
During upload, each file is uploaded concurrently in 4MB chunks using a separate HTTPS connection per file. Chunked uploads are used so that in the event of a failure (which is entirely possible because the internet is not perfect), the upload can be retried. If there is an error, a retry will be attempted after a certain period of time.
|
||||
|
||||
Uploading will be generally be faster if there are fewer files that are larger in size vs if there are lots of smaller files. Depending on the types and quantities of files being uploaded, it might be beneficial to separately compress and archive everything into a single archive (using something like `tar` or `zip`) before starting and artifact upload to speed things up.
|
||||
|
||||
## Is my artifact compressed?
|
||||
|
||||
GZip is used internally to compress individual files before starting an upload. Compression helps reduce the total amount of data that must be uploaded and stored while helping to speed up uploads (this performance benefit is significant especially on self hosted runners). If GZip does not reduce the size of the file that is being uploaded, the original file is uploaded as-is.
|
||||
|
||||
Compression using GZip also helps speed up artifact download as part of a workflow. Header information is used to determine if an individual file was uploaded using GZip and if necessary, decompression is used.
|
||||
|
||||
When downloading an artifact from the GitHub UI (this differs from downloading an artifact during a workflow), a single Zip file is dynamically created that contains all of the files uploaded as part of an artifact. Any files that were uploaded using GZip will be decompressed on the server before being added to the Zip file with the remaining files.
|
|
@ -0,0 +1 @@
|
|||
Docs will be added here once development of version `2.0.0` has finished
|
|
@ -1,57 +0,0 @@
|
|||
# Implementation Details
|
||||
|
||||
Warning: Implementation details may change at any time without notice. This is meant to serve as a reference to help users understand the package.
|
||||
|
||||
## Upload/Compression flow
|
||||
|
||||
![image](https://user-images.githubusercontent.com/16109154/79765587-19522b00-8327-11ea-9679-410bb10e1b13.png)
|
||||
|
||||
During artifact upload, gzip is used to compress individual files that then get uploaded. This is used to minimize the amount of data that gets uploaded which reduces the total amount of HTTP calls (upload happens in 4MB chunks). This results in considerably faster uploads with huge performance implications especially on self-hosted runners.
|
||||
|
||||
If a file is less than 64KB in size, a passthrough stream (readable and writable) is used to convert an in-memory buffer into a readable stream without any extra streams or pipping.
|
||||
|
||||
## Retry Logic when downloading an individual file
|
||||
|
||||
![image](https://user-images.githubusercontent.com/16109154/78555461-5be71400-780d-11ea-9abd-b05b77a95a3f.png)
|
||||
|
||||
## Proxy support
|
||||
|
||||
This package uses the `@actions/http-client` NPM package internally which supports proxied requests out of the box.
|
||||
|
||||
## HttpManager
|
||||
|
||||
### `keep-alive` header
|
||||
|
||||
When an HTTP call is made to upload or download an individual file, the server will close the HTTP connection after the upload/download is complete and respond with a header indicating `Connection: close`.
|
||||
|
||||
[HTTP closed connection header information](https://tools.ietf.org/html/rfc2616#section-14.10)
|
||||
|
||||
TCP connections are sometimes not immediately closed by the node client (Windows might hold on to the port for an extra period of time before actually releasing it for example) and a large amount of closed connections can cause port exhaustion before ports get released and are available again.
|
||||
|
||||
VMs hosted by GitHub Actions have 1024 available ports so uploading 1000+ files very quickly can cause port exhaustion if connections get closed immediately. This can start to cause strange undefined behavior and timeouts.
|
||||
|
||||
In order for connections to not close immediately, the `keep-alive` header is used to indicate to the server that the connection should stay open. If a `keep-alive` header is used, the connection needs to be disposed of by calling `dispose()` in the `HttpClient`.
|
||||
|
||||
[`keep-alive` header information](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Keep-Alive)
|
||||
[@actions/http-client client disposal](https://github.com/actions/http-client/blob/04e5ad73cd3fd1f5610a32116b0759eddf6570d2/index.ts#L292)
|
||||
|
||||
|
||||
### Multiple HTTP clients
|
||||
|
||||
During an artifact upload or download, files are concurrently uploaded or downloaded using `async/await`. When an error or retry is encountered, the `HttpClient` that made a call is disposed of and a new one is created. If a single `HttpClient` was used for all HTTP calls and it had to be disposed, it could inadvertently effect any other calls that could be concurrently happening.
|
||||
|
||||
Any other concurrent uploads or downloads should be left untouched. Because of this, each concurrent upload or download gets its own `HttpClient`. The `http-manager` is used to manage all available clients and each concurrent upload or download maintains a `httpClientIndex` that keep track of which client should be used (and potentially disposed and recycled if necessary)
|
||||
|
||||
### Potential resource leaks
|
||||
|
||||
When an HTTP response is received, it consists of two parts
|
||||
- `message`
|
||||
- `body`
|
||||
|
||||
The `message` contains information such as the response code and header information and it is available immediately. The body however is not available immediately and it can be read by calling `await response.readBody()`.
|
||||
|
||||
TCP connections consist of an input and output buffer to manage what is sent and received across a connection. If the body is not read (even if its contents are not needed) the buffers can stay in use even after `dispose()` gets called on the `HttpClient`. The buffers get released automatically after a certain period of time, but in order for them to be explicitly cleared, `readBody()` is always called.
|
||||
|
||||
### Non Concurrent calls
|
||||
|
||||
Both `upload-http-client` and `download-http-client` do not instantiate or create any HTTP clients (the `HttpManager` has that responsibility). If an HTTP call has to be made that does not require the `keep-alive` header (such as when calling `listArtifacts` or `patchArtifactSize`), the first `HttpClient` in the `HttpManager` is used. The number of available clients is equal to the upload or download concurrency and there will always be at least one available.
|
|
@ -1,164 +1,38 @@
|
|||
{
|
||||
"name": "@actions/artifact",
|
||||
"version": "1.1.1",
|
||||
"version": "2.0.0",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@actions/artifact",
|
||||
"version": "1.1.1",
|
||||
"version": "2.0.0",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.9.1",
|
||||
"@actions/http-client": "^2.0.1",
|
||||
"tmp": "^0.2.1",
|
||||
"tmp-promise": "^3.0.2"
|
||||
"@actions/core": "^1.10.0",
|
||||
"@actions/http-client": "^2.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/tmp": "^0.2.1",
|
||||
"typescript": "^3.8.3"
|
||||
"typescript": "^3.9.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@actions/core": {
|
||||
"version": "1.9.1",
|
||||
"resolved": "https://registry.npmjs.org/@actions/core/-/core-1.9.1.tgz",
|
||||
"integrity": "sha512-5ad+U2YGrmmiw6du20AQW5XuWo7UKN2052FjSV7MX+Wfjf8sCqcsZe62NfgHys4QI4/Y+vQvLKYL8jWtA1ZBTA==",
|
||||
"version": "1.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.0.tgz",
|
||||
"integrity": "sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==",
|
||||
"dependencies": {
|
||||
"@actions/http-client": "^2.0.1",
|
||||
"uuid": "^8.3.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@actions/http-client": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.0.1.tgz",
|
||||
"integrity": "sha512-PIXiMVtz6VvyaRsGY268qvj57hXQEpsYogYOu2nrQhlf+XCGmZstmuZBbAybUl1nQGnvS1k1eEsQ69ZoD7xlSw==",
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.1.0.tgz",
|
||||
"integrity": "sha512-BonhODnXr3amchh4qkmjPMUO8mFi/zLaaCeCAJZqch8iQqyDnVIkySjB38VHAC8IJ+bnlgfOqlhpyCUZHlQsqw==",
|
||||
"dependencies": {
|
||||
"tunnel": "^0.0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/tmp": {
|
||||
"version": "0.2.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/tmp/-/tmp-0.2.3.tgz",
|
||||
"integrity": "sha512-dDZH/tXzwjutnuk4UacGgFRwV+JSLaXL1ikvidfJprkb7L9Nx1njcRHHmi3Dsvt7pgqqTEeucQuOrWHPFgzVHA==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/balanced-match": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
|
||||
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
|
||||
},
|
||||
"node_modules/brace-expansion": {
|
||||
"version": "1.1.11",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
||||
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
|
||||
"dependencies": {
|
||||
"balanced-match": "^1.0.0",
|
||||
"concat-map": "0.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/concat-map": {
|
||||
"version": "0.0.1",
|
||||
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
|
||||
"integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
|
||||
},
|
||||
"node_modules/fs.realpath": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
|
||||
"integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
|
||||
},
|
||||
"node_modules/glob": {
|
||||
"version": "7.2.0",
|
||||
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz",
|
||||
"integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==",
|
||||
"dependencies": {
|
||||
"fs.realpath": "^1.0.0",
|
||||
"inflight": "^1.0.4",
|
||||
"inherits": "2",
|
||||
"minimatch": "^3.0.4",
|
||||
"once": "^1.3.0",
|
||||
"path-is-absolute": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "*"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/isaacs"
|
||||
}
|
||||
},
|
||||
"node_modules/inflight": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
|
||||
"integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
|
||||
"dependencies": {
|
||||
"once": "^1.3.0",
|
||||
"wrappy": "1"
|
||||
}
|
||||
},
|
||||
"node_modules/inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"node_modules/minimatch": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
|
||||
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
|
||||
"dependencies": {
|
||||
"brace-expansion": "^1.1.7"
|
||||
},
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/once": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
|
||||
"integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
|
||||
"dependencies": {
|
||||
"wrappy": "1"
|
||||
}
|
||||
},
|
||||
"node_modules/path-is-absolute": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
|
||||
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/rimraf": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
|
||||
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
|
||||
"dependencies": {
|
||||
"glob": "^7.1.3"
|
||||
},
|
||||
"bin": {
|
||||
"rimraf": "bin.js"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/isaacs"
|
||||
}
|
||||
},
|
||||
"node_modules/tmp": {
|
||||
"version": "0.2.1",
|
||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz",
|
||||
"integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==",
|
||||
"dependencies": {
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.17.0"
|
||||
}
|
||||
},
|
||||
"node_modules/tmp-promise": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.3.tgz",
|
||||
"integrity": "sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ==",
|
||||
"dependencies": {
|
||||
"tmp": "^0.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/tunnel": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz",
|
||||
|
@ -187,133 +61,26 @@
|
|||
"bin": {
|
||||
"uuid": "dist/bin/uuid"
|
||||
}
|
||||
},
|
||||
"node_modules/wrappy": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
|
||||
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": {
|
||||
"version": "1.9.1",
|
||||
"resolved": "https://registry.npmjs.org/@actions/core/-/core-1.9.1.tgz",
|
||||
"integrity": "sha512-5ad+U2YGrmmiw6du20AQW5XuWo7UKN2052FjSV7MX+Wfjf8sCqcsZe62NfgHys4QI4/Y+vQvLKYL8jWtA1ZBTA==",
|
||||
"version": "1.10.0",
|
||||
"resolved": "https://registry.npmjs.org/@actions/core/-/core-1.10.0.tgz",
|
||||
"integrity": "sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==",
|
||||
"requires": {
|
||||
"@actions/http-client": "^2.0.1",
|
||||
"uuid": "^8.3.2"
|
||||
}
|
||||
},
|
||||
"@actions/http-client": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.0.1.tgz",
|
||||
"integrity": "sha512-PIXiMVtz6VvyaRsGY268qvj57hXQEpsYogYOu2nrQhlf+XCGmZstmuZBbAybUl1nQGnvS1k1eEsQ69ZoD7xlSw==",
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.1.0.tgz",
|
||||
"integrity": "sha512-BonhODnXr3amchh4qkmjPMUO8mFi/zLaaCeCAJZqch8iQqyDnVIkySjB38VHAC8IJ+bnlgfOqlhpyCUZHlQsqw==",
|
||||
"requires": {
|
||||
"tunnel": "^0.0.6"
|
||||
}
|
||||
},
|
||||
"@types/tmp": {
|
||||
"version": "0.2.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/tmp/-/tmp-0.2.3.tgz",
|
||||
"integrity": "sha512-dDZH/tXzwjutnuk4UacGgFRwV+JSLaXL1ikvidfJprkb7L9Nx1njcRHHmi3Dsvt7pgqqTEeucQuOrWHPFgzVHA==",
|
||||
"dev": true
|
||||
},
|
||||
"balanced-match": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
|
||||
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
|
||||
},
|
||||
"brace-expansion": {
|
||||
"version": "1.1.11",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
||||
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
|
||||
"requires": {
|
||||
"balanced-match": "^1.0.0",
|
||||
"concat-map": "0.0.1"
|
||||
}
|
||||
},
|
||||
"concat-map": {
|
||||
"version": "0.0.1",
|
||||
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
|
||||
"integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
|
||||
},
|
||||
"fs.realpath": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
|
||||
"integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
|
||||
},
|
||||
"glob": {
|
||||
"version": "7.2.0",
|
||||
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz",
|
||||
"integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==",
|
||||
"requires": {
|
||||
"fs.realpath": "^1.0.0",
|
||||
"inflight": "^1.0.4",
|
||||
"inherits": "2",
|
||||
"minimatch": "^3.0.4",
|
||||
"once": "^1.3.0",
|
||||
"path-is-absolute": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"inflight": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
|
||||
"integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
|
||||
"requires": {
|
||||
"once": "^1.3.0",
|
||||
"wrappy": "1"
|
||||
}
|
||||
},
|
||||
"inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"minimatch": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
|
||||
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
|
||||
"requires": {
|
||||
"brace-expansion": "^1.1.7"
|
||||
}
|
||||
},
|
||||
"once": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
|
||||
"integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
|
||||
"requires": {
|
||||
"wrappy": "1"
|
||||
}
|
||||
},
|
||||
"path-is-absolute": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
|
||||
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
|
||||
},
|
||||
"rimraf": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
|
||||
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
|
||||
"requires": {
|
||||
"glob": "^7.1.3"
|
||||
}
|
||||
},
|
||||
"tmp": {
|
||||
"version": "0.2.1",
|
||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz",
|
||||
"integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==",
|
||||
"requires": {
|
||||
"rimraf": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"tmp-promise": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.3.tgz",
|
||||
"integrity": "sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ==",
|
||||
"requires": {
|
||||
"tmp": "^0.2.0"
|
||||
}
|
||||
},
|
||||
"tunnel": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz",
|
||||
|
@ -329,11 +96,6 @@
|
|||
"version": "8.3.2",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
|
||||
"integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="
|
||||
},
|
||||
"wrappy": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
|
||||
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@actions/artifact",
|
||||
"version": "1.1.1",
|
||||
"version": "2.0.0",
|
||||
"preview": true,
|
||||
"description": "Actions artifact lib",
|
||||
"keywords": [
|
||||
|
@ -37,13 +37,10 @@
|
|||
"url": "https://github.com/actions/toolkit/issues"
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.9.1",
|
||||
"@actions/http-client": "^2.0.1",
|
||||
"tmp": "^0.2.1",
|
||||
"tmp-promise": "^3.0.2"
|
||||
"@actions/core": "^1.10.0",
|
||||
"@actions/http-client": "^2.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/tmp": "^0.2.1",
|
||||
"typescript": "^3.8.3"
|
||||
"typescript": "^3.9.10"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
import {UploadOptions} from './internal/upload-options'
|
||||
import {UploadResponse} from './internal/upload-response'
|
||||
import {DownloadOptions} from './internal/download-options'
|
||||
import {DownloadResponse} from './internal/download-response'
|
||||
import {ArtifactClient, DefaultArtifactClient} from './internal/artifact-client'
|
||||
|
||||
export {
|
||||
ArtifactClient,
|
||||
UploadResponse,
|
||||
UploadOptions,
|
||||
DownloadResponse,
|
||||
DownloadOptions
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an ArtifactClient
|
||||
*/
|
||||
export function create(): ArtifactClient {
|
||||
return DefaultArtifactClient.create()
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
import {ArtifactClient, Client} from './internal/client'
|
||||
import {UploadOptions} from './internal/upload/upload-options'
|
||||
import {UploadResponse} from './internal/upload/upload-response'
|
||||
|
||||
/**
|
||||
* Exported functionality that we want to expose for any users of @actions/artifact
|
||||
*/
|
||||
export {ArtifactClient, UploadOptions, UploadResponse}
|
||||
|
||||
export function create(): ArtifactClient {
|
||||
return Client.create()
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
/**
|
||||
* Mocks default limits for easier testing
|
||||
*/
|
||||
export function getUploadFileConcurrency(): number {
|
||||
return 1
|
||||
}
|
||||
|
||||
export function getUploadChunkConcurrency(): number {
|
||||
return 1
|
||||
}
|
||||
|
||||
export function getUploadChunkSize(): number {
|
||||
return 4 * 1024 * 1024 // 4 MB Chunks
|
||||
}
|
||||
|
||||
export function getRetryLimit(): number {
|
||||
return 2
|
||||
}
|
||||
|
||||
export function getRetryMultiplier(): number {
|
||||
return 1.5
|
||||
}
|
||||
|
||||
export function getInitialRetryIntervalInMilliseconds(): number {
|
||||
return 10
|
||||
}
|
||||
|
||||
export function getDownloadFileConcurrency(): number {
|
||||
return 1
|
||||
}
|
||||
|
||||
/**
|
||||
* Mocks the 'ACTIONS_RUNTIME_TOKEN', 'ACTIONS_RUNTIME_URL' and 'GITHUB_RUN_ID' env variables
|
||||
* that are only available from a node context on the runner. This allows for tests to run
|
||||
* locally without the env variables actually being set
|
||||
*/
|
||||
export function getRuntimeToken(): string {
|
||||
return 'totally-valid-token'
|
||||
}
|
||||
|
||||
export function getRuntimeUrl(): string {
|
||||
return 'https://www.example.com/'
|
||||
}
|
||||
|
||||
export function getWorkFlowRunId(): string {
|
||||
return '15'
|
||||
}
|
||||
|
||||
export function getRetentionDays(): string | undefined {
|
||||
return '45'
|
||||
}
|
|
@ -1,282 +0,0 @@
|
|||
import * as core from '@actions/core'
|
||||
import {
|
||||
UploadSpecification,
|
||||
getUploadSpecification
|
||||
} from './upload-specification'
|
||||
import {UploadHttpClient} from './upload-http-client'
|
||||
import {UploadResponse} from './upload-response'
|
||||
import {UploadOptions} from './upload-options'
|
||||
import {DownloadOptions} from './download-options'
|
||||
import {DownloadResponse} from './download-response'
|
||||
import {
|
||||
createDirectoriesForArtifact,
|
||||
createEmptyFilesForArtifact
|
||||
} from './utils'
|
||||
import {checkArtifactName} from './path-and-artifact-name-validation'
|
||||
import {DownloadHttpClient} from './download-http-client'
|
||||
import {getDownloadSpecification} from './download-specification'
|
||||
import {getWorkSpaceDirectory} from './config-variables'
|
||||
import {normalize, resolve} from 'path'
|
||||
|
||||
export interface ArtifactClient {
|
||||
/**
|
||||
* Uploads an artifact
|
||||
*
|
||||
* @param name the name of the artifact, required
|
||||
* @param files a list of absolute or relative paths that denote what files should be uploaded
|
||||
* @param rootDirectory an absolute or relative file path that denotes the root parent directory of the files being uploaded
|
||||
* @param options extra options for customizing the upload behavior
|
||||
* @returns single UploadInfo object
|
||||
*/
|
||||
uploadArtifact(
|
||||
name: string,
|
||||
files: string[],
|
||||
rootDirectory: string,
|
||||
options?: UploadOptions
|
||||
): Promise<UploadResponse>
|
||||
|
||||
/**
|
||||
* Downloads a single artifact associated with a run
|
||||
*
|
||||
* @param name the name of the artifact being downloaded
|
||||
* @param path optional path that denotes where the artifact will be downloaded to
|
||||
* @param options extra options that allow for the customization of the download behavior
|
||||
*/
|
||||
downloadArtifact(
|
||||
name: string,
|
||||
path?: string,
|
||||
options?: DownloadOptions
|
||||
): Promise<DownloadResponse>
|
||||
|
||||
/**
|
||||
* Downloads all artifacts associated with a run. Because there are multiple artifacts being downloaded, a folder will be created for each one in the specified or default directory
|
||||
* @param path optional path that denotes where the artifacts will be downloaded to
|
||||
*/
|
||||
downloadAllArtifacts(path?: string): Promise<DownloadResponse[]>
|
||||
}
|
||||
|
||||
export class DefaultArtifactClient implements ArtifactClient {
|
||||
/**
|
||||
* Constructs a DefaultArtifactClient
|
||||
*/
|
||||
static create(): DefaultArtifactClient {
|
||||
return new DefaultArtifactClient()
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads an artifact
|
||||
*/
|
||||
async uploadArtifact(
|
||||
name: string,
|
||||
files: string[],
|
||||
rootDirectory: string,
|
||||
options?: UploadOptions | undefined
|
||||
): Promise<UploadResponse> {
|
||||
core.info(
|
||||
`Starting artifact upload
|
||||
For more detailed logs during the artifact upload process, enable step-debugging: https://docs.github.com/actions/monitoring-and-troubleshooting-workflows/enabling-debug-logging#enabling-step-debug-logging`
|
||||
)
|
||||
checkArtifactName(name)
|
||||
|
||||
// Get specification for the files being uploaded
|
||||
const uploadSpecification: UploadSpecification[] = getUploadSpecification(
|
||||
name,
|
||||
rootDirectory,
|
||||
files
|
||||
)
|
||||
const uploadResponse: UploadResponse = {
|
||||
artifactName: name,
|
||||
artifactItems: [],
|
||||
size: 0,
|
||||
failedItems: []
|
||||
}
|
||||
|
||||
const uploadHttpClient = new UploadHttpClient()
|
||||
|
||||
if (uploadSpecification.length === 0) {
|
||||
core.warning(`No files found that can be uploaded`)
|
||||
} else {
|
||||
// Create an entry for the artifact in the file container
|
||||
const response = await uploadHttpClient.createArtifactInFileContainer(
|
||||
name,
|
||||
options
|
||||
)
|
||||
if (!response.fileContainerResourceUrl) {
|
||||
core.debug(response.toString())
|
||||
throw new Error(
|
||||
'No URL provided by the Artifact Service to upload an artifact to'
|
||||
)
|
||||
}
|
||||
|
||||
core.debug(`Upload Resource URL: ${response.fileContainerResourceUrl}`)
|
||||
core.info(
|
||||
`Container for artifact "${name}" successfully created. Starting upload of file(s)`
|
||||
)
|
||||
|
||||
// Upload each of the files that were found concurrently
|
||||
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
|
||||
response.fileContainerResourceUrl,
|
||||
uploadSpecification,
|
||||
options
|
||||
)
|
||||
|
||||
// Update the size of the artifact to indicate we are done uploading
|
||||
// The uncompressed size is used for display when downloading a zip of the artifact from the UI
|
||||
core.info(
|
||||
`File upload process has finished. Finalizing the artifact upload`
|
||||
)
|
||||
await uploadHttpClient.patchArtifactSize(uploadResult.totalSize, name)
|
||||
|
||||
if (uploadResult.failedItems.length > 0) {
|
||||
core.info(
|
||||
`Upload finished. There were ${uploadResult.failedItems.length} items that failed to upload`
|
||||
)
|
||||
} else {
|
||||
core.info(
|
||||
`Artifact has been finalized. All files have been successfully uploaded!`
|
||||
)
|
||||
}
|
||||
|
||||
core.info(
|
||||
`
|
||||
The raw size of all the files that were specified for upload is ${uploadResult.totalSize} bytes
|
||||
The size of all the files that were uploaded is ${uploadResult.uploadSize} bytes. This takes into account any gzip compression used to reduce the upload size, time and storage
|
||||
|
||||
Note: The size of downloaded zips can differ significantly from the reported size. For more information see: https://github.com/actions/upload-artifact#zipped-artifact-downloads \r\n`
|
||||
)
|
||||
|
||||
uploadResponse.artifactItems = uploadSpecification.map(
|
||||
item => item.absoluteFilePath
|
||||
)
|
||||
uploadResponse.size = uploadResult.uploadSize
|
||||
uploadResponse.failedItems = uploadResult.failedItems
|
||||
}
|
||||
return uploadResponse
|
||||
}
|
||||
|
||||
async downloadArtifact(
|
||||
name: string,
|
||||
path?: string | undefined,
|
||||
options?: DownloadOptions | undefined
|
||||
): Promise<DownloadResponse> {
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const artifacts = await downloadHttpClient.listArtifacts()
|
||||
if (artifacts.count === 0) {
|
||||
throw new Error(
|
||||
`Unable to find any artifacts for the associated workflow`
|
||||
)
|
||||
}
|
||||
|
||||
const artifactToDownload = artifacts.value.find(artifact => {
|
||||
return artifact.name === name
|
||||
})
|
||||
if (!artifactToDownload) {
|
||||
throw new Error(`Unable to find an artifact with the name: ${name}`)
|
||||
}
|
||||
|
||||
const items = await downloadHttpClient.getContainerItems(
|
||||
artifactToDownload.name,
|
||||
artifactToDownload.fileContainerResourceUrl
|
||||
)
|
||||
|
||||
if (!path) {
|
||||
path = getWorkSpaceDirectory()
|
||||
}
|
||||
path = normalize(path)
|
||||
path = resolve(path)
|
||||
|
||||
// During upload, empty directories are rejected by the remote server so there should be no artifacts that consist of only empty directories
|
||||
const downloadSpecification = getDownloadSpecification(
|
||||
name,
|
||||
items.value,
|
||||
path,
|
||||
options?.createArtifactFolder || false
|
||||
)
|
||||
|
||||
if (downloadSpecification.filesToDownload.length === 0) {
|
||||
core.info(
|
||||
`No downloadable files were found for the artifact: ${artifactToDownload.name}`
|
||||
)
|
||||
} else {
|
||||
// Create all necessary directories recursively before starting any download
|
||||
await createDirectoriesForArtifact(
|
||||
downloadSpecification.directoryStructure
|
||||
)
|
||||
core.info('Directory structure has been set up for the artifact')
|
||||
await createEmptyFilesForArtifact(
|
||||
downloadSpecification.emptyFilesToCreate
|
||||
)
|
||||
await downloadHttpClient.downloadSingleArtifact(
|
||||
downloadSpecification.filesToDownload
|
||||
)
|
||||
}
|
||||
|
||||
return {
|
||||
artifactName: name,
|
||||
downloadPath: downloadSpecification.rootDownloadLocation
|
||||
}
|
||||
}
|
||||
|
||||
async downloadAllArtifacts(
|
||||
path?: string | undefined
|
||||
): Promise<DownloadResponse[]> {
|
||||
const downloadHttpClient = new DownloadHttpClient()
|
||||
|
||||
const response: DownloadResponse[] = []
|
||||
const artifacts = await downloadHttpClient.listArtifacts()
|
||||
if (artifacts.count === 0) {
|
||||
core.info('Unable to find any artifacts for the associated workflow')
|
||||
return response
|
||||
}
|
||||
|
||||
if (!path) {
|
||||
path = getWorkSpaceDirectory()
|
||||
}
|
||||
path = normalize(path)
|
||||
path = resolve(path)
|
||||
|
||||
let downloadedArtifacts = 0
|
||||
while (downloadedArtifacts < artifacts.count) {
|
||||
const currentArtifactToDownload = artifacts.value[downloadedArtifacts]
|
||||
downloadedArtifacts += 1
|
||||
core.info(
|
||||
`starting download of artifact ${currentArtifactToDownload.name} : ${downloadedArtifacts}/${artifacts.count}`
|
||||
)
|
||||
|
||||
// Get container entries for the specific artifact
|
||||
const items = await downloadHttpClient.getContainerItems(
|
||||
currentArtifactToDownload.name,
|
||||
currentArtifactToDownload.fileContainerResourceUrl
|
||||
)
|
||||
|
||||
const downloadSpecification = getDownloadSpecification(
|
||||
currentArtifactToDownload.name,
|
||||
items.value,
|
||||
path,
|
||||
true
|
||||
)
|
||||
if (downloadSpecification.filesToDownload.length === 0) {
|
||||
core.info(
|
||||
`No downloadable files were found for any artifact ${currentArtifactToDownload.name}`
|
||||
)
|
||||
} else {
|
||||
await createDirectoriesForArtifact(
|
||||
downloadSpecification.directoryStructure
|
||||
)
|
||||
await createEmptyFilesForArtifact(
|
||||
downloadSpecification.emptyFilesToCreate
|
||||
)
|
||||
await downloadHttpClient.downloadSingleArtifact(
|
||||
downloadSpecification.filesToDownload
|
||||
)
|
||||
}
|
||||
|
||||
response.push({
|
||||
artifactName: currentArtifactToDownload.name,
|
||||
downloadPath: downloadSpecification.rootDownloadLocation
|
||||
})
|
||||
}
|
||||
return response
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
import {UploadOptions} from './upload/upload-options'
|
||||
import {UploadResponse} from './upload/upload-response'
|
||||
import {uploadArtifact} from './upload/upload-artifact'
|
||||
|
||||
export interface ArtifactClient {
|
||||
/**
|
||||
* Uploads an artifact
|
||||
*
|
||||
* @param name the name of the artifact, required
|
||||
* @param files a list of absolute or relative paths that denote what files should be uploaded
|
||||
* @param rootDirectory an absolute or relative file path that denotes the root parent directory of the files being uploaded
|
||||
* @param options extra options for customizing the upload behavior
|
||||
* @returns single UploadInfo object
|
||||
*/
|
||||
uploadArtifact(
|
||||
name: string,
|
||||
files: string[],
|
||||
rootDirectory: string,
|
||||
options?: UploadOptions
|
||||
): Promise<UploadResponse>
|
||||
|
||||
// TODO Download functionality
|
||||
}
|
||||
|
||||
export class Client implements ArtifactClient {
|
||||
/**
|
||||
* Constructs a Client
|
||||
*/
|
||||
static create(): Client {
|
||||
return new Client()
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads an artifact
|
||||
*/
|
||||
async uploadArtifact(
|
||||
name: string,
|
||||
files: string[],
|
||||
rootDirectory: string,
|
||||
options?: UploadOptions | undefined
|
||||
): Promise<UploadResponse> {
|
||||
return uploadArtifact(name, files, rootDirectory, options)
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
// The number of concurrent uploads that happens at the same time
|
||||
export function getUploadFileConcurrency(): number {
|
||||
return 2
|
||||
}
|
||||
|
||||
// When uploading large files that can't be uploaded with a single http call, this controls
|
||||
// the chunk size that is used during upload
|
||||
export function getUploadChunkSize(): number {
|
||||
return 8 * 1024 * 1024 // 8 MB Chunks
|
||||
}
|
||||
|
||||
// The maximum number of retries that can be attempted before an upload or download fails
|
||||
export function getRetryLimit(): number {
|
||||
return 5
|
||||
}
|
||||
|
||||
// With exponential backoff, the larger the retry count, the larger the wait time before another attempt
|
||||
// The retry multiplier controls by how much the backOff time increases depending on the number of retries
|
||||
export function getRetryMultiplier(): number {
|
||||
return 1.5
|
||||
}
|
||||
|
||||
// The initial wait time if an upload or download fails and a retry is being attempted for the first time
|
||||
export function getInitialRetryIntervalInMilliseconds(): number {
|
||||
return 3000
|
||||
}
|
||||
|
||||
// The number of concurrent downloads that happens at the same time
|
||||
export function getDownloadFileConcurrency(): number {
|
||||
return 2
|
||||
}
|
||||
|
||||
export function getRuntimeToken(): string {
|
||||
const token = process.env['ACTIONS_RUNTIME_TOKEN']
|
||||
if (!token) {
|
||||
throw new Error('Unable to get ACTIONS_RUNTIME_TOKEN env variable')
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
export function getRuntimeUrl(): string {
|
||||
const runtimeUrl = process.env['ACTIONS_RUNTIME_URL']
|
||||
if (!runtimeUrl) {
|
||||
throw new Error('Unable to get ACTIONS_RUNTIME_URL env variable')
|
||||
}
|
||||
return runtimeUrl
|
||||
}
|
||||
|
||||
export function getWorkFlowRunId(): string {
|
||||
const workFlowRunId = process.env['GITHUB_RUN_ID']
|
||||
if (!workFlowRunId) {
|
||||
throw new Error('Unable to get GITHUB_RUN_ID env variable')
|
||||
}
|
||||
return workFlowRunId
|
||||
}
|
||||
|
||||
export function getWorkSpaceDirectory(): string {
|
||||
const workspaceDirectory = process.env['GITHUB_WORKSPACE']
|
||||
if (!workspaceDirectory) {
|
||||
throw new Error('Unable to get GITHUB_WORKSPACE env variable')
|
||||
}
|
||||
return workspaceDirectory
|
||||
}
|
||||
|
||||
export function getRetentionDays(): string | undefined {
|
||||
return process.env['GITHUB_RETENTION_DAYS']
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
export interface ArtifactResponse {
|
||||
containerId: string
|
||||
size: number
|
||||
signedContent: string
|
||||
fileContainerResourceUrl: string
|
||||
type: string
|
||||
name: string
|
||||
url: string
|
||||
}
|
||||
|
||||
export interface CreateArtifactParameters {
|
||||
Type: string
|
||||
Name: string
|
||||
RetentionDays?: number
|
||||
}
|
||||
|
||||
export interface PatchArtifactSize {
|
||||
Size: number
|
||||
}
|
||||
|
||||
export interface PatchArtifactSizeSuccessResponse {
|
||||
containerId: number
|
||||
size: number
|
||||
signedContent: string
|
||||
type: string
|
||||
name: string
|
||||
url: string
|
||||
uploadUrl: string
|
||||
}
|
||||
|
||||
export interface UploadResults {
|
||||
/**
|
||||
* The size in bytes of data that was transferred during the upload process to the actions backend service. This takes into account possible
|
||||
* gzip compression to reduce the amount of data that needs to be transferred
|
||||
*/
|
||||
uploadSize: number
|
||||
|
||||
/**
|
||||
* The raw size of the files that were specified for upload
|
||||
*/
|
||||
totalSize: number
|
||||
|
||||
/**
|
||||
* An array of files that failed to upload
|
||||
*/
|
||||
failedItems: string[]
|
||||
}
|
||||
|
||||
export interface ListArtifactsResponse {
|
||||
count: number
|
||||
value: ArtifactResponse[]
|
||||
}
|
||||
|
||||
export interface QueryArtifactResponse {
|
||||
count: number
|
||||
value: ContainerEntry[]
|
||||
}
|
||||
|
||||
export interface ContainerEntry {
|
||||
containerId: number
|
||||
scopeIdentifier: string
|
||||
path: string
|
||||
itemType: string
|
||||
status: string
|
||||
fileLength?: number
|
||||
fileEncoding?: number
|
||||
fileType?: number
|
||||
dateCreated: string
|
||||
dateLastModified: string
|
||||
createdBy: string
|
||||
lastModifiedBy: string
|
||||
itemLocation: string
|
||||
contentLocation: string
|
||||
fileId?: number
|
||||
contentId: string
|
||||
}
|
|
@ -1,317 +0,0 @@
|
|||
/**
|
||||
* CRC64: cyclic redundancy check, 64-bits
|
||||
*
|
||||
* In order to validate that artifacts are not being corrupted over the wire, this redundancy check allows us to
|
||||
* validate that there was no corruption during transmission. The implementation here is based on Go's hash/crc64 pkg,
|
||||
* but without the slicing-by-8 optimization: https://cs.opensource.google/go/go/+/master:src/hash/crc64/crc64.go
|
||||
*
|
||||
* This implementation uses a pregenerated table based on 0x9A6C9329AC4BC9B5 as the polynomial, the same polynomial that
|
||||
* is used for Azure Storage: https://github.com/Azure/azure-storage-net/blob/cbe605f9faa01bfc3003d75fc5a16b2eaccfe102/Lib/Common/Core/Util/Crc64.cs#L27
|
||||
*/
|
||||
|
||||
// when transpile target is >= ES2020 (after dropping node 12) these can be changed to bigint literals - ts(2737)
|
||||
const PREGEN_POLY_TABLE = [
|
||||
BigInt('0x0000000000000000'),
|
||||
BigInt('0x7F6EF0C830358979'),
|
||||
BigInt('0xFEDDE190606B12F2'),
|
||||
BigInt('0x81B31158505E9B8B'),
|
||||
BigInt('0xC962E5739841B68F'),
|
||||
BigInt('0xB60C15BBA8743FF6'),
|
||||
BigInt('0x37BF04E3F82AA47D'),
|
||||
BigInt('0x48D1F42BC81F2D04'),
|
||||
BigInt('0xA61CECB46814FE75'),
|
||||
BigInt('0xD9721C7C5821770C'),
|
||||
BigInt('0x58C10D24087FEC87'),
|
||||
BigInt('0x27AFFDEC384A65FE'),
|
||||
BigInt('0x6F7E09C7F05548FA'),
|
||||
BigInt('0x1010F90FC060C183'),
|
||||
BigInt('0x91A3E857903E5A08'),
|
||||
BigInt('0xEECD189FA00BD371'),
|
||||
BigInt('0x78E0FF3B88BE6F81'),
|
||||
BigInt('0x078E0FF3B88BE6F8'),
|
||||
BigInt('0x863D1EABE8D57D73'),
|
||||
BigInt('0xF953EE63D8E0F40A'),
|
||||
BigInt('0xB1821A4810FFD90E'),
|
||||
BigInt('0xCEECEA8020CA5077'),
|
||||
BigInt('0x4F5FFBD87094CBFC'),
|
||||
BigInt('0x30310B1040A14285'),
|
||||
BigInt('0xDEFC138FE0AA91F4'),
|
||||
BigInt('0xA192E347D09F188D'),
|
||||
BigInt('0x2021F21F80C18306'),
|
||||
BigInt('0x5F4F02D7B0F40A7F'),
|
||||
BigInt('0x179EF6FC78EB277B'),
|
||||
BigInt('0x68F0063448DEAE02'),
|
||||
BigInt('0xE943176C18803589'),
|
||||
BigInt('0x962DE7A428B5BCF0'),
|
||||
BigInt('0xF1C1FE77117CDF02'),
|
||||
BigInt('0x8EAF0EBF2149567B'),
|
||||
BigInt('0x0F1C1FE77117CDF0'),
|
||||
BigInt('0x7072EF2F41224489'),
|
||||
BigInt('0x38A31B04893D698D'),
|
||||
BigInt('0x47CDEBCCB908E0F4'),
|
||||
BigInt('0xC67EFA94E9567B7F'),
|
||||
BigInt('0xB9100A5CD963F206'),
|
||||
BigInt('0x57DD12C379682177'),
|
||||
BigInt('0x28B3E20B495DA80E'),
|
||||
BigInt('0xA900F35319033385'),
|
||||
BigInt('0xD66E039B2936BAFC'),
|
||||
BigInt('0x9EBFF7B0E12997F8'),
|
||||
BigInt('0xE1D10778D11C1E81'),
|
||||
BigInt('0x606216208142850A'),
|
||||
BigInt('0x1F0CE6E8B1770C73'),
|
||||
BigInt('0x8921014C99C2B083'),
|
||||
BigInt('0xF64FF184A9F739FA'),
|
||||
BigInt('0x77FCE0DCF9A9A271'),
|
||||
BigInt('0x08921014C99C2B08'),
|
||||
BigInt('0x4043E43F0183060C'),
|
||||
BigInt('0x3F2D14F731B68F75'),
|
||||
BigInt('0xBE9E05AF61E814FE'),
|
||||
BigInt('0xC1F0F56751DD9D87'),
|
||||
BigInt('0x2F3DEDF8F1D64EF6'),
|
||||
BigInt('0x50531D30C1E3C78F'),
|
||||
BigInt('0xD1E00C6891BD5C04'),
|
||||
BigInt('0xAE8EFCA0A188D57D'),
|
||||
BigInt('0xE65F088B6997F879'),
|
||||
BigInt('0x9931F84359A27100'),
|
||||
BigInt('0x1882E91B09FCEA8B'),
|
||||
BigInt('0x67EC19D339C963F2'),
|
||||
BigInt('0xD75ADABD7A6E2D6F'),
|
||||
BigInt('0xA8342A754A5BA416'),
|
||||
BigInt('0x29873B2D1A053F9D'),
|
||||
BigInt('0x56E9CBE52A30B6E4'),
|
||||
BigInt('0x1E383FCEE22F9BE0'),
|
||||
BigInt('0x6156CF06D21A1299'),
|
||||
BigInt('0xE0E5DE5E82448912'),
|
||||
BigInt('0x9F8B2E96B271006B'),
|
||||
BigInt('0x71463609127AD31A'),
|
||||
BigInt('0x0E28C6C1224F5A63'),
|
||||
BigInt('0x8F9BD7997211C1E8'),
|
||||
BigInt('0xF0F5275142244891'),
|
||||
BigInt('0xB824D37A8A3B6595'),
|
||||
BigInt('0xC74A23B2BA0EECEC'),
|
||||
BigInt('0x46F932EAEA507767'),
|
||||
BigInt('0x3997C222DA65FE1E'),
|
||||
BigInt('0xAFBA2586F2D042EE'),
|
||||
BigInt('0xD0D4D54EC2E5CB97'),
|
||||
BigInt('0x5167C41692BB501C'),
|
||||
BigInt('0x2E0934DEA28ED965'),
|
||||
BigInt('0x66D8C0F56A91F461'),
|
||||
BigInt('0x19B6303D5AA47D18'),
|
||||
BigInt('0x980521650AFAE693'),
|
||||
BigInt('0xE76BD1AD3ACF6FEA'),
|
||||
BigInt('0x09A6C9329AC4BC9B'),
|
||||
BigInt('0x76C839FAAAF135E2'),
|
||||
BigInt('0xF77B28A2FAAFAE69'),
|
||||
BigInt('0x8815D86ACA9A2710'),
|
||||
BigInt('0xC0C42C4102850A14'),
|
||||
BigInt('0xBFAADC8932B0836D'),
|
||||
BigInt('0x3E19CDD162EE18E6'),
|
||||
BigInt('0x41773D1952DB919F'),
|
||||
BigInt('0x269B24CA6B12F26D'),
|
||||
BigInt('0x59F5D4025B277B14'),
|
||||
BigInt('0xD846C55A0B79E09F'),
|
||||
BigInt('0xA72835923B4C69E6'),
|
||||
BigInt('0xEFF9C1B9F35344E2'),
|
||||
BigInt('0x90973171C366CD9B'),
|
||||
BigInt('0x1124202993385610'),
|
||||
BigInt('0x6E4AD0E1A30DDF69'),
|
||||
BigInt('0x8087C87E03060C18'),
|
||||
BigInt('0xFFE938B633338561'),
|
||||
BigInt('0x7E5A29EE636D1EEA'),
|
||||
BigInt('0x0134D92653589793'),
|
||||
BigInt('0x49E52D0D9B47BA97'),
|
||||
BigInt('0x368BDDC5AB7233EE'),
|
||||
BigInt('0xB738CC9DFB2CA865'),
|
||||
BigInt('0xC8563C55CB19211C'),
|
||||
BigInt('0x5E7BDBF1E3AC9DEC'),
|
||||
BigInt('0x21152B39D3991495'),
|
||||
BigInt('0xA0A63A6183C78F1E'),
|
||||
BigInt('0xDFC8CAA9B3F20667'),
|
||||
BigInt('0x97193E827BED2B63'),
|
||||
BigInt('0xE877CE4A4BD8A21A'),
|
||||
BigInt('0x69C4DF121B863991'),
|
||||
BigInt('0x16AA2FDA2BB3B0E8'),
|
||||
BigInt('0xF86737458BB86399'),
|
||||
BigInt('0x8709C78DBB8DEAE0'),
|
||||
BigInt('0x06BAD6D5EBD3716B'),
|
||||
BigInt('0x79D4261DDBE6F812'),
|
||||
BigInt('0x3105D23613F9D516'),
|
||||
BigInt('0x4E6B22FE23CC5C6F'),
|
||||
BigInt('0xCFD833A67392C7E4'),
|
||||
BigInt('0xB0B6C36E43A74E9D'),
|
||||
BigInt('0x9A6C9329AC4BC9B5'),
|
||||
BigInt('0xE50263E19C7E40CC'),
|
||||
BigInt('0x64B172B9CC20DB47'),
|
||||
BigInt('0x1BDF8271FC15523E'),
|
||||
BigInt('0x530E765A340A7F3A'),
|
||||
BigInt('0x2C608692043FF643'),
|
||||
BigInt('0xADD397CA54616DC8'),
|
||||
BigInt('0xD2BD67026454E4B1'),
|
||||
BigInt('0x3C707F9DC45F37C0'),
|
||||
BigInt('0x431E8F55F46ABEB9'),
|
||||
BigInt('0xC2AD9E0DA4342532'),
|
||||
BigInt('0xBDC36EC59401AC4B'),
|
||||
BigInt('0xF5129AEE5C1E814F'),
|
||||
BigInt('0x8A7C6A266C2B0836'),
|
||||
BigInt('0x0BCF7B7E3C7593BD'),
|
||||
BigInt('0x74A18BB60C401AC4'),
|
||||
BigInt('0xE28C6C1224F5A634'),
|
||||
BigInt('0x9DE29CDA14C02F4D'),
|
||||
BigInt('0x1C518D82449EB4C6'),
|
||||
BigInt('0x633F7D4A74AB3DBF'),
|
||||
BigInt('0x2BEE8961BCB410BB'),
|
||||
BigInt('0x548079A98C8199C2'),
|
||||
BigInt('0xD53368F1DCDF0249'),
|
||||
BigInt('0xAA5D9839ECEA8B30'),
|
||||
BigInt('0x449080A64CE15841'),
|
||||
BigInt('0x3BFE706E7CD4D138'),
|
||||
BigInt('0xBA4D61362C8A4AB3'),
|
||||
BigInt('0xC52391FE1CBFC3CA'),
|
||||
BigInt('0x8DF265D5D4A0EECE'),
|
||||
BigInt('0xF29C951DE49567B7'),
|
||||
BigInt('0x732F8445B4CBFC3C'),
|
||||
BigInt('0x0C41748D84FE7545'),
|
||||
BigInt('0x6BAD6D5EBD3716B7'),
|
||||
BigInt('0x14C39D968D029FCE'),
|
||||
BigInt('0x95708CCEDD5C0445'),
|
||||
BigInt('0xEA1E7C06ED698D3C'),
|
||||
BigInt('0xA2CF882D2576A038'),
|
||||
BigInt('0xDDA178E515432941'),
|
||||
BigInt('0x5C1269BD451DB2CA'),
|
||||
BigInt('0x237C997575283BB3'),
|
||||
BigInt('0xCDB181EAD523E8C2'),
|
||||
BigInt('0xB2DF7122E51661BB'),
|
||||
BigInt('0x336C607AB548FA30'),
|
||||
BigInt('0x4C0290B2857D7349'),
|
||||
BigInt('0x04D364994D625E4D'),
|
||||
BigInt('0x7BBD94517D57D734'),
|
||||
BigInt('0xFA0E85092D094CBF'),
|
||||
BigInt('0x856075C11D3CC5C6'),
|
||||
BigInt('0x134D926535897936'),
|
||||
BigInt('0x6C2362AD05BCF04F'),
|
||||
BigInt('0xED9073F555E26BC4'),
|
||||
BigInt('0x92FE833D65D7E2BD'),
|
||||
BigInt('0xDA2F7716ADC8CFB9'),
|
||||
BigInt('0xA54187DE9DFD46C0'),
|
||||
BigInt('0x24F29686CDA3DD4B'),
|
||||
BigInt('0x5B9C664EFD965432'),
|
||||
BigInt('0xB5517ED15D9D8743'),
|
||||
BigInt('0xCA3F8E196DA80E3A'),
|
||||
BigInt('0x4B8C9F413DF695B1'),
|
||||
BigInt('0x34E26F890DC31CC8'),
|
||||
BigInt('0x7C339BA2C5DC31CC'),
|
||||
BigInt('0x035D6B6AF5E9B8B5'),
|
||||
BigInt('0x82EE7A32A5B7233E'),
|
||||
BigInt('0xFD808AFA9582AA47'),
|
||||
BigInt('0x4D364994D625E4DA'),
|
||||
BigInt('0x3258B95CE6106DA3'),
|
||||
BigInt('0xB3EBA804B64EF628'),
|
||||
BigInt('0xCC8558CC867B7F51'),
|
||||
BigInt('0x8454ACE74E645255'),
|
||||
BigInt('0xFB3A5C2F7E51DB2C'),
|
||||
BigInt('0x7A894D772E0F40A7'),
|
||||
BigInt('0x05E7BDBF1E3AC9DE'),
|
||||
BigInt('0xEB2AA520BE311AAF'),
|
||||
BigInt('0x944455E88E0493D6'),
|
||||
BigInt('0x15F744B0DE5A085D'),
|
||||
BigInt('0x6A99B478EE6F8124'),
|
||||
BigInt('0x224840532670AC20'),
|
||||
BigInt('0x5D26B09B16452559'),
|
||||
BigInt('0xDC95A1C3461BBED2'),
|
||||
BigInt('0xA3FB510B762E37AB'),
|
||||
BigInt('0x35D6B6AF5E9B8B5B'),
|
||||
BigInt('0x4AB846676EAE0222'),
|
||||
BigInt('0xCB0B573F3EF099A9'),
|
||||
BigInt('0xB465A7F70EC510D0'),
|
||||
BigInt('0xFCB453DCC6DA3DD4'),
|
||||
BigInt('0x83DAA314F6EFB4AD'),
|
||||
BigInt('0x0269B24CA6B12F26'),
|
||||
BigInt('0x7D0742849684A65F'),
|
||||
BigInt('0x93CA5A1B368F752E'),
|
||||
BigInt('0xECA4AAD306BAFC57'),
|
||||
BigInt('0x6D17BB8B56E467DC'),
|
||||
BigInt('0x12794B4366D1EEA5'),
|
||||
BigInt('0x5AA8BF68AECEC3A1'),
|
||||
BigInt('0x25C64FA09EFB4AD8'),
|
||||
BigInt('0xA4755EF8CEA5D153'),
|
||||
BigInt('0xDB1BAE30FE90582A'),
|
||||
BigInt('0xBCF7B7E3C7593BD8'),
|
||||
BigInt('0xC399472BF76CB2A1'),
|
||||
BigInt('0x422A5673A732292A'),
|
||||
BigInt('0x3D44A6BB9707A053'),
|
||||
BigInt('0x759552905F188D57'),
|
||||
BigInt('0x0AFBA2586F2D042E'),
|
||||
BigInt('0x8B48B3003F739FA5'),
|
||||
BigInt('0xF42643C80F4616DC'),
|
||||
BigInt('0x1AEB5B57AF4DC5AD'),
|
||||
BigInt('0x6585AB9F9F784CD4'),
|
||||
BigInt('0xE436BAC7CF26D75F'),
|
||||
BigInt('0x9B584A0FFF135E26'),
|
||||
BigInt('0xD389BE24370C7322'),
|
||||
BigInt('0xACE74EEC0739FA5B'),
|
||||
BigInt('0x2D545FB4576761D0'),
|
||||
BigInt('0x523AAF7C6752E8A9'),
|
||||
BigInt('0xC41748D84FE75459'),
|
||||
BigInt('0xBB79B8107FD2DD20'),
|
||||
BigInt('0x3ACAA9482F8C46AB'),
|
||||
BigInt('0x45A459801FB9CFD2'),
|
||||
BigInt('0x0D75ADABD7A6E2D6'),
|
||||
BigInt('0x721B5D63E7936BAF'),
|
||||
BigInt('0xF3A84C3BB7CDF024'),
|
||||
BigInt('0x8CC6BCF387F8795D'),
|
||||
BigInt('0x620BA46C27F3AA2C'),
|
||||
BigInt('0x1D6554A417C62355'),
|
||||
BigInt('0x9CD645FC4798B8DE'),
|
||||
BigInt('0xE3B8B53477AD31A7'),
|
||||
BigInt('0xAB69411FBFB21CA3'),
|
||||
BigInt('0xD407B1D78F8795DA'),
|
||||
BigInt('0x55B4A08FDFD90E51'),
|
||||
BigInt('0x2ADA5047EFEC8728')
|
||||
]
|
||||
|
||||
export type CRC64DigestEncoding = 'hex' | 'base64' | 'buffer'
|
||||
|
||||
class CRC64 {
|
||||
private _crc: bigint
|
||||
|
||||
constructor() {
|
||||
this._crc = BigInt(0)
|
||||
}
|
||||
|
||||
update(data: Buffer | string): void {
|
||||
const buffer = typeof data === 'string' ? Buffer.from(data) : data
|
||||
let crc = CRC64.flip64Bits(this._crc)
|
||||
|
||||
for (const dataByte of buffer) {
|
||||
const crcByte = Number(crc & BigInt(0xff))
|
||||
crc = PREGEN_POLY_TABLE[crcByte ^ dataByte] ^ (crc >> BigInt(8))
|
||||
}
|
||||
|
||||
this._crc = CRC64.flip64Bits(crc)
|
||||
}
|
||||
|
||||
digest(encoding?: CRC64DigestEncoding): string | Buffer {
|
||||
switch (encoding) {
|
||||
case 'hex':
|
||||
return this._crc.toString(16).toUpperCase()
|
||||
case 'base64':
|
||||
return this.toBuffer().toString('base64')
|
||||
default:
|
||||
return this.toBuffer()
|
||||
}
|
||||
}
|
||||
|
||||
private toBuffer(): Buffer {
|
||||
return Buffer.from(
|
||||
[0, 8, 16, 24, 32, 40, 48, 56].map(s =>
|
||||
Number((this._crc >> BigInt(s)) & BigInt(0xff))
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
static flip64Bits(n: bigint): bigint {
|
||||
return (BigInt(1) << BigInt(64)) - BigInt(1) - n
|
||||
}
|
||||
}
|
||||
|
||||
export default CRC64
|
|
@ -1,362 +0,0 @@
|
|||
import * as fs from 'fs'
|
||||
import * as core from '@actions/core'
|
||||
import * as zlib from 'zlib'
|
||||
import {
|
||||
getArtifactUrl,
|
||||
getDownloadHeaders,
|
||||
isSuccessStatusCode,
|
||||
isRetryableStatusCode,
|
||||
isThrottledStatusCode,
|
||||
getExponentialRetryTimeInMilliseconds,
|
||||
tryGetRetryAfterValueTimeInMilliseconds,
|
||||
displayHttpDiagnostics,
|
||||
getFileSize,
|
||||
rmFile,
|
||||
sleep
|
||||
} from './utils'
|
||||
import {URL} from 'url'
|
||||
import {StatusReporter} from './status-reporter'
|
||||
import {performance} from 'perf_hooks'
|
||||
import {ListArtifactsResponse, QueryArtifactResponse} from './contracts'
|
||||
import {HttpClientResponse} from '@actions/http-client'
|
||||
import {HttpManager} from './http-manager'
|
||||
import {DownloadItem} from './download-specification'
|
||||
import {getDownloadFileConcurrency, getRetryLimit} from './config-variables'
|
||||
import {IncomingHttpHeaders} from 'http'
|
||||
import {retryHttpClientRequest} from './requestUtils'
|
||||
|
||||
export class DownloadHttpClient {
|
||||
// http manager is used for concurrent connections when downloading multiple files at once
|
||||
private downloadHttpManager: HttpManager
|
||||
private statusReporter: StatusReporter
|
||||
|
||||
constructor() {
|
||||
this.downloadHttpManager = new HttpManager(
|
||||
getDownloadFileConcurrency(),
|
||||
'@actions/artifact-download'
|
||||
)
|
||||
// downloads are usually significantly faster than uploads so display status information every second
|
||||
this.statusReporter = new StatusReporter(1000)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a list of all artifacts that are in a specific container
|
||||
*/
|
||||
async listArtifacts(): Promise<ListArtifactsResponse> {
|
||||
const artifactUrl = getArtifactUrl()
|
||||
|
||||
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
|
||||
const client = this.downloadHttpManager.getClient(0)
|
||||
const headers = getDownloadHeaders('application/json')
|
||||
const response = await retryHttpClientRequest('List Artifacts', async () =>
|
||||
client.get(artifactUrl, headers)
|
||||
)
|
||||
const body: string = await response.readBody()
|
||||
return JSON.parse(body)
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches a set of container items that describe the contents of an artifact
|
||||
* @param artifactName the name of the artifact
|
||||
* @param containerUrl the artifact container URL for the run
|
||||
*/
|
||||
async getContainerItems(
|
||||
artifactName: string,
|
||||
containerUrl: string
|
||||
): Promise<QueryArtifactResponse> {
|
||||
// the itemPath search parameter controls which containers will be returned
|
||||
const resourceUrl = new URL(containerUrl)
|
||||
resourceUrl.searchParams.append('itemPath', artifactName)
|
||||
|
||||
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
|
||||
const client = this.downloadHttpManager.getClient(0)
|
||||
const headers = getDownloadHeaders('application/json')
|
||||
const response = await retryHttpClientRequest(
|
||||
'Get Container Items',
|
||||
async () => client.get(resourceUrl.toString(), headers)
|
||||
)
|
||||
const body: string = await response.readBody()
|
||||
return JSON.parse(body)
|
||||
}
|
||||
|
||||
/**
|
||||
* Concurrently downloads all the files that are part of an artifact
|
||||
* @param downloadItems information about what items to download and where to save them
|
||||
*/
|
||||
async downloadSingleArtifact(downloadItems: DownloadItem[]): Promise<void> {
|
||||
const DOWNLOAD_CONCURRENCY = getDownloadFileConcurrency()
|
||||
// limit the number of files downloaded at a single time
|
||||
core.debug(`Download file concurrency is set to ${DOWNLOAD_CONCURRENCY}`)
|
||||
const parallelDownloads = [...new Array(DOWNLOAD_CONCURRENCY).keys()]
|
||||
let currentFile = 0
|
||||
let downloadedFiles = 0
|
||||
|
||||
core.info(
|
||||
`Total number of files that will be downloaded: ${downloadItems.length}`
|
||||
)
|
||||
|
||||
this.statusReporter.setTotalNumberOfFilesToProcess(downloadItems.length)
|
||||
this.statusReporter.start()
|
||||
|
||||
await Promise.all(
|
||||
parallelDownloads.map(async index => {
|
||||
while (currentFile < downloadItems.length) {
|
||||
const currentFileToDownload = downloadItems[currentFile]
|
||||
currentFile += 1
|
||||
|
||||
const startTime = performance.now()
|
||||
await this.downloadIndividualFile(
|
||||
index,
|
||||
currentFileToDownload.sourceLocation,
|
||||
currentFileToDownload.targetPath
|
||||
)
|
||||
|
||||
if (core.isDebug()) {
|
||||
core.debug(
|
||||
`File: ${++downloadedFiles}/${downloadItems.length}. ${
|
||||
currentFileToDownload.targetPath
|
||||
} took ${(performance.now() - startTime).toFixed(
|
||||
3
|
||||
)} milliseconds to finish downloading`
|
||||
)
|
||||
}
|
||||
|
||||
this.statusReporter.incrementProcessedCount()
|
||||
}
|
||||
})
|
||||
)
|
||||
.catch(error => {
|
||||
throw new Error(`Unable to download the artifact: ${error}`)
|
||||
})
|
||||
.finally(() => {
|
||||
this.statusReporter.stop()
|
||||
// safety dispose all connections
|
||||
this.downloadHttpManager.disposeAndReplaceAllClients()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads an individual file
|
||||
* @param httpClientIndex the index of the http client that is used to make all of the calls
|
||||
* @param artifactLocation origin location where a file will be downloaded from
|
||||
* @param downloadPath destination location for the file being downloaded
|
||||
*/
|
||||
private async downloadIndividualFile(
|
||||
httpClientIndex: number,
|
||||
artifactLocation: string,
|
||||
downloadPath: string
|
||||
): Promise<void> {
|
||||
let retryCount = 0
|
||||
const retryLimit = getRetryLimit()
|
||||
let destinationStream = fs.createWriteStream(downloadPath)
|
||||
const headers = getDownloadHeaders('application/json', true, true)
|
||||
|
||||
// a single GET request is used to download a file
|
||||
const makeDownloadRequest = async (): Promise<HttpClientResponse> => {
|
||||
const client = this.downloadHttpManager.getClient(httpClientIndex)
|
||||
return await client.get(artifactLocation, headers)
|
||||
}
|
||||
|
||||
// check the response headers to determine if the file was compressed using gzip
|
||||
const isGzip = (incomingHeaders: IncomingHttpHeaders): boolean => {
|
||||
return (
|
||||
'content-encoding' in incomingHeaders &&
|
||||
incomingHeaders['content-encoding'] === 'gzip'
|
||||
)
|
||||
}
|
||||
|
||||
// Increments the current retry count and then checks if the retry limit has been reached
|
||||
// If there have been too many retries, fail so the download stops. If there is a retryAfterValue value provided,
|
||||
// it will be used
|
||||
const backOff = async (retryAfterValue?: number): Promise<void> => {
|
||||
retryCount++
|
||||
if (retryCount > retryLimit) {
|
||||
return Promise.reject(
|
||||
new Error(
|
||||
`Retry limit has been reached. Unable to download ${artifactLocation}`
|
||||
)
|
||||
)
|
||||
} else {
|
||||
this.downloadHttpManager.disposeAndReplaceClient(httpClientIndex)
|
||||
if (retryAfterValue) {
|
||||
// Back off by waiting the specified time denoted by the retry-after header
|
||||
core.info(
|
||||
`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the download`
|
||||
)
|
||||
await sleep(retryAfterValue)
|
||||
} else {
|
||||
// Back off using an exponential value that depends on the retry count
|
||||
const backoffTime = getExponentialRetryTimeInMilliseconds(retryCount)
|
||||
core.info(
|
||||
`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the download`
|
||||
)
|
||||
await sleep(backoffTime)
|
||||
}
|
||||
core.info(
|
||||
`Finished backoff for retry #${retryCount}, continuing with download`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const isAllBytesReceived = (
|
||||
expected?: string,
|
||||
received?: number
|
||||
): boolean => {
|
||||
// be lenient, if any input is missing, assume success, i.e. not truncated
|
||||
if (
|
||||
!expected ||
|
||||
!received ||
|
||||
process.env['ACTIONS_ARTIFACT_SKIP_DOWNLOAD_VALIDATION']
|
||||
) {
|
||||
core.info('Skipping download validation.')
|
||||
return true
|
||||
}
|
||||
|
||||
return parseInt(expected) === received
|
||||
}
|
||||
|
||||
const resetDestinationStream = async (
|
||||
fileDownloadPath: string
|
||||
): Promise<void> => {
|
||||
destinationStream.close()
|
||||
// await until file is created at downloadpath; node15 and up fs.createWriteStream had not created a file yet
|
||||
await new Promise<void>(resolve => {
|
||||
destinationStream.on('close', resolve)
|
||||
if (destinationStream.writableFinished) {
|
||||
resolve()
|
||||
}
|
||||
})
|
||||
await rmFile(fileDownloadPath)
|
||||
destinationStream = fs.createWriteStream(fileDownloadPath)
|
||||
}
|
||||
|
||||
// keep trying to download a file until a retry limit has been reached
|
||||
while (retryCount <= retryLimit) {
|
||||
let response: HttpClientResponse
|
||||
try {
|
||||
response = await makeDownloadRequest()
|
||||
} catch (error) {
|
||||
// if an error is caught, it is usually indicative of a timeout so retry the download
|
||||
core.info('An error occurred while attempting to download a file')
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(error)
|
||||
|
||||
// increment the retryCount and use exponential backoff to wait before making the next request
|
||||
await backOff()
|
||||
continue
|
||||
}
|
||||
|
||||
let forceRetry = false
|
||||
if (isSuccessStatusCode(response.message.statusCode)) {
|
||||
// The body contains the contents of the file however calling response.readBody() causes all the content to be converted to a string
|
||||
// which can cause some gzip encoded data to be lost
|
||||
// Instead of using response.readBody(), response.message is a readableStream that can be directly used to get the raw body contents
|
||||
try {
|
||||
const isGzipped = isGzip(response.message.headers)
|
||||
await this.pipeResponseToFile(response, destinationStream, isGzipped)
|
||||
|
||||
if (
|
||||
isGzipped ||
|
||||
isAllBytesReceived(
|
||||
response.message.headers['content-length'],
|
||||
await getFileSize(downloadPath)
|
||||
)
|
||||
) {
|
||||
return
|
||||
} else {
|
||||
forceRetry = true
|
||||
}
|
||||
} catch (error) {
|
||||
// retry on error, most likely streams were corrupted
|
||||
forceRetry = true
|
||||
}
|
||||
}
|
||||
|
||||
if (forceRetry || isRetryableStatusCode(response.message.statusCode)) {
|
||||
core.info(
|
||||
`A ${response.message.statusCode} response code has been received while attempting to download an artifact`
|
||||
)
|
||||
resetDestinationStream(downloadPath)
|
||||
// if a throttled status code is received, try to get the retryAfter header value, else differ to standard exponential backoff
|
||||
isThrottledStatusCode(response.message.statusCode)
|
||||
? await backOff(
|
||||
tryGetRetryAfterValueTimeInMilliseconds(response.message.headers)
|
||||
)
|
||||
: await backOff()
|
||||
} else {
|
||||
// Some unexpected response code, fail immediately and stop the download
|
||||
displayHttpDiagnostics(response)
|
||||
return Promise.reject(
|
||||
new Error(
|
||||
`Unexpected http ${response.message.statusCode} during download for ${artifactLocation}`
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pipes the response from downloading an individual file to the appropriate destination stream while decoding gzip content if necessary
|
||||
* @param response the http response received when downloading a file
|
||||
* @param destinationStream the stream where the file should be written to
|
||||
* @param isGzip a boolean denoting if the content is compressed using gzip and if we need to decode it
|
||||
*/
|
||||
async pipeResponseToFile(
|
||||
response: HttpClientResponse,
|
||||
destinationStream: fs.WriteStream,
|
||||
isGzip: boolean
|
||||
): Promise<void> {
|
||||
await new Promise((resolve, reject) => {
|
||||
if (isGzip) {
|
||||
const gunzip = zlib.createGunzip()
|
||||
response.message
|
||||
.on('error', error => {
|
||||
core.info(
|
||||
`An error occurred while attempting to read the response stream`
|
||||
)
|
||||
gunzip.close()
|
||||
destinationStream.close()
|
||||
reject(error)
|
||||
})
|
||||
.pipe(gunzip)
|
||||
.on('error', error => {
|
||||
core.info(
|
||||
`An error occurred while attempting to decompress the response stream`
|
||||
)
|
||||
destinationStream.close()
|
||||
reject(error)
|
||||
})
|
||||
.pipe(destinationStream)
|
||||
.on('close', () => {
|
||||
resolve()
|
||||
})
|
||||
.on('error', error => {
|
||||
core.info(
|
||||
`An error occurred while writing a downloaded file to ${destinationStream.path}`
|
||||
)
|
||||
reject(error)
|
||||
})
|
||||
} else {
|
||||
response.message
|
||||
.on('error', error => {
|
||||
core.info(
|
||||
`An error occurred while attempting to read the response stream`
|
||||
)
|
||||
destinationStream.close()
|
||||
reject(error)
|
||||
})
|
||||
.pipe(destinationStream)
|
||||
.on('close', () => {
|
||||
resolve()
|
||||
})
|
||||
.on('error', error => {
|
||||
core.info(
|
||||
`An error occurred while writing a downloaded file to ${destinationStream.path}`
|
||||
)
|
||||
reject(error)
|
||||
})
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
export interface DownloadOptions {
|
||||
/**
|
||||
* Specifies if a folder is created for the artifact that is downloaded (contents downloaded into this folder),
|
||||
* defaults to false if not specified
|
||||
* */
|
||||
createArtifactFolder?: boolean
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
export interface DownloadResponse {
|
||||
/**
|
||||
* The name of the artifact that was downloaded
|
||||
*/
|
||||
artifactName: string
|
||||
|
||||
/**
|
||||
* The full Path to where the artifact was downloaded
|
||||
*/
|
||||
downloadPath: string
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
import * as path from 'path'
|
||||
import {ContainerEntry} from './contracts'
|
||||
|
||||
export interface DownloadSpecification {
|
||||
// root download location for the artifact
|
||||
rootDownloadLocation: string
|
||||
|
||||
// directories that need to be created for all the items in the artifact
|
||||
directoryStructure: string[]
|
||||
|
||||
// empty files that are part of the artifact that don't require any downloading
|
||||
emptyFilesToCreate: string[]
|
||||
|
||||
// individual files that need to be downloaded as part of the artifact
|
||||
filesToDownload: DownloadItem[]
|
||||
}
|
||||
|
||||
export interface DownloadItem {
|
||||
// Url that denotes where to download the item from
|
||||
sourceLocation: string
|
||||
|
||||
// Information about where the file should be downloaded to
|
||||
targetPath: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a specification for a set of files that will be downloaded
|
||||
* @param artifactName the name of the artifact
|
||||
* @param artifactEntries a set of container entries that describe that files that make up an artifact
|
||||
* @param downloadPath the path where the artifact will be downloaded to
|
||||
* @param includeRootDirectory specifies if there should be an extra directory (denoted by the artifact name) where the artifact files should be downloaded to
|
||||
*/
|
||||
export function getDownloadSpecification(
|
||||
artifactName: string,
|
||||
artifactEntries: ContainerEntry[],
|
||||
downloadPath: string,
|
||||
includeRootDirectory: boolean
|
||||
): DownloadSpecification {
|
||||
// use a set for the directory paths so that there are no duplicates
|
||||
const directories = new Set<string>()
|
||||
|
||||
const specifications: DownloadSpecification = {
|
||||
rootDownloadLocation: includeRootDirectory
|
||||
? path.join(downloadPath, artifactName)
|
||||
: downloadPath,
|
||||
directoryStructure: [],
|
||||
emptyFilesToCreate: [],
|
||||
filesToDownload: []
|
||||
}
|
||||
|
||||
for (const entry of artifactEntries) {
|
||||
// Ignore artifacts in the container that don't begin with the same name
|
||||
if (
|
||||
entry.path.startsWith(`${artifactName}/`) ||
|
||||
entry.path.startsWith(`${artifactName}\\`)
|
||||
) {
|
||||
// normalize all separators to the local OS
|
||||
const normalizedPathEntry = path.normalize(entry.path)
|
||||
// entry.path always starts with the artifact name, if includeRootDirectory is false, remove the name from the beginning of the path
|
||||
const filePath = path.join(
|
||||
downloadPath,
|
||||
includeRootDirectory
|
||||
? normalizedPathEntry
|
||||
: normalizedPathEntry.replace(artifactName, '')
|
||||
)
|
||||
|
||||
// Case insensitive folder structure maintained in the backend, not every folder is created so the 'folder'
|
||||
// itemType cannot be relied upon. The file must be used to determine the directory structure
|
||||
if (entry.itemType === 'file') {
|
||||
// Get the directories that we need to create from the filePath for each individual file
|
||||
directories.add(path.dirname(filePath))
|
||||
if (entry.fileLength === 0) {
|
||||
// An empty file was uploaded, create the empty files locally so that no extra http calls are made
|
||||
specifications.emptyFilesToCreate.push(filePath)
|
||||
} else {
|
||||
specifications.filesToDownload.push({
|
||||
sourceLocation: entry.contentLocation,
|
||||
targetPath: filePath
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
specifications.directoryStructure = Array.from(directories)
|
||||
return specifications
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
import {HttpClient} from '@actions/http-client'
|
||||
import {createHttpClient} from './utils'
|
||||
|
||||
/**
|
||||
* Used for managing http clients during either upload or download
|
||||
*/
|
||||
export class HttpManager {
|
||||
private clients: HttpClient[]
|
||||
private userAgent: string
|
||||
|
||||
constructor(clientCount: number, userAgent: string) {
|
||||
if (clientCount < 1) {
|
||||
throw new Error('There must be at least one client')
|
||||
}
|
||||
this.userAgent = userAgent
|
||||
this.clients = new Array(clientCount).fill(createHttpClient(userAgent))
|
||||
}
|
||||
|
||||
getClient(index: number): HttpClient {
|
||||
return this.clients[index]
|
||||
}
|
||||
|
||||
// client disposal is necessary if a keep-alive connection is used to properly close the connection
|
||||
// for more information see: https://github.com/actions/http-client/blob/04e5ad73cd3fd1f5610a32116b0759eddf6570d2/index.ts#L292
|
||||
disposeAndReplaceClient(index: number): void {
|
||||
this.clients[index].dispose()
|
||||
this.clients[index] = createHttpClient(this.userAgent)
|
||||
}
|
||||
|
||||
disposeAndReplaceAllClients(): void {
|
||||
for (const [index] of this.clients.entries()) {
|
||||
this.disposeAndReplaceClient(index)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
import {info} from '@actions/core'
|
||||
|
||||
/**
|
||||
* Invalid characters that cannot be in the artifact name or an uploaded file. Will be rejected
|
||||
* from the server if attempted to be sent over. These characters are not allowed due to limitations with certain
|
||||
* file systems such as NTFS. To maintain platform-agnostic behavior, all characters that are not supported by an
|
||||
* individual filesystem/platform will not be supported on all fileSystems/platforms
|
||||
*
|
||||
* FilePaths can include characters such as \ and / which are not permitted in the artifact name alone
|
||||
*/
|
||||
const invalidArtifactFilePathCharacters = new Map<string, string>([
|
||||
['"', ' Double quote "'],
|
||||
[':', ' Colon :'],
|
||||
['<', ' Less than <'],
|
||||
['>', ' Greater than >'],
|
||||
['|', ' Vertical bar |'],
|
||||
['*', ' Asterisk *'],
|
||||
['?', ' Question mark ?'],
|
||||
['\r', ' Carriage return \\r'],
|
||||
['\n', ' Line feed \\n']
|
||||
])
|
||||
|
||||
const invalidArtifactNameCharacters = new Map<string, string>([
|
||||
...invalidArtifactFilePathCharacters,
|
||||
['\\', ' Backslash \\'],
|
||||
['/', ' Forward slash /']
|
||||
])
|
||||
|
||||
/**
|
||||
* Scans the name of the artifact to make sure there are no illegal characters
|
||||
*/
|
||||
export function checkArtifactName(name: string): void {
|
||||
if (!name) {
|
||||
throw new Error(`Artifact name: ${name}, is incorrectly provided`)
|
||||
}
|
||||
|
||||
for (const [
|
||||
invalidCharacterKey,
|
||||
errorMessageForCharacter
|
||||
] of invalidArtifactNameCharacters) {
|
||||
if (name.includes(invalidCharacterKey)) {
|
||||
throw new Error(
|
||||
`Artifact name is not valid: ${name}. Contains the following character: ${errorMessageForCharacter}
|
||||
|
||||
Invalid characters include: ${Array.from(
|
||||
invalidArtifactNameCharacters.values()
|
||||
).toString()}
|
||||
|
||||
These characters are not allowed in the artifact name due to limitations with certain file systems such as NTFS. To maintain file system agnostic behavior, these characters are intentionally not allowed to prevent potential problems with downloads on different file systems.`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
info(`Artifact name is valid!`)
|
||||
}
|
||||
|
||||
/**
|
||||
* Scans the name of the filePath used to make sure there are no illegal characters
|
||||
*/
|
||||
export function checkArtifactFilePath(path: string): void {
|
||||
if (!path) {
|
||||
throw new Error(`Artifact path: ${path}, is incorrectly provided`)
|
||||
}
|
||||
|
||||
for (const [
|
||||
invalidCharacterKey,
|
||||
errorMessageForCharacter
|
||||
] of invalidArtifactFilePathCharacters) {
|
||||
if (path.includes(invalidCharacterKey)) {
|
||||
throw new Error(
|
||||
`Artifact path is not valid: ${path}. Contains the following character: ${errorMessageForCharacter}
|
||||
|
||||
Invalid characters include: ${Array.from(
|
||||
invalidArtifactFilePathCharacters.values()
|
||||
).toString()}
|
||||
|
||||
The following characters are not allowed in files that are uploaded due to limitations with certain file systems such as NTFS. To maintain file system agnostic behavior, these characters are intentionally not allowed to prevent potential problems with downloads on different file systems.
|
||||
`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
import {HttpClientResponse} from '@actions/http-client'
|
||||
import {
|
||||
isRetryableStatusCode,
|
||||
isSuccessStatusCode,
|
||||
sleep,
|
||||
getExponentialRetryTimeInMilliseconds,
|
||||
displayHttpDiagnostics
|
||||
} from './utils'
|
||||
import * as core from '@actions/core'
|
||||
import {getRetryLimit} from './config-variables'
|
||||
|
||||
export async function retry(
|
||||
name: string,
|
||||
operation: () => Promise<HttpClientResponse>,
|
||||
customErrorMessages: Map<number, string>,
|
||||
maxAttempts: number
|
||||
): Promise<HttpClientResponse> {
|
||||
let response: HttpClientResponse | undefined = undefined
|
||||
let statusCode: number | undefined = undefined
|
||||
let isRetryable = false
|
||||
let errorMessage = ''
|
||||
let customErrorInformation: string | undefined = undefined
|
||||
let attempt = 1
|
||||
|
||||
while (attempt <= maxAttempts) {
|
||||
try {
|
||||
response = await operation()
|
||||
statusCode = response.message.statusCode
|
||||
|
||||
if (isSuccessStatusCode(statusCode)) {
|
||||
return response
|
||||
}
|
||||
|
||||
// Extra error information that we want to display if a particular response code is hit
|
||||
if (statusCode) {
|
||||
customErrorInformation = customErrorMessages.get(statusCode)
|
||||
}
|
||||
|
||||
isRetryable = isRetryableStatusCode(statusCode)
|
||||
errorMessage = `Artifact service responded with ${statusCode}`
|
||||
} catch (error) {
|
||||
isRetryable = true
|
||||
errorMessage = error.message
|
||||
}
|
||||
|
||||
if (!isRetryable) {
|
||||
core.info(`${name} - Error is not retryable`)
|
||||
if (response) {
|
||||
displayHttpDiagnostics(response)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
core.info(
|
||||
`${name} - Attempt ${attempt} of ${maxAttempts} failed with error: ${errorMessage}`
|
||||
)
|
||||
|
||||
await sleep(getExponentialRetryTimeInMilliseconds(attempt))
|
||||
attempt++
|
||||
}
|
||||
|
||||
if (response) {
|
||||
displayHttpDiagnostics(response)
|
||||
}
|
||||
|
||||
if (customErrorInformation) {
|
||||
throw Error(`${name} failed: ${customErrorInformation}`)
|
||||
}
|
||||
throw Error(`${name} failed: ${errorMessage}`)
|
||||
}
|
||||
|
||||
export async function retryHttpClientRequest(
|
||||
name: string,
|
||||
method: () => Promise<HttpClientResponse>,
|
||||
customErrorMessages: Map<number, string> = new Map(),
|
||||
maxAttempts = getRetryLimit()
|
||||
): Promise<HttpClientResponse> {
|
||||
return await retry(name, method, customErrorMessages, maxAttempts)
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
import {info} from '@actions/core'
|
||||
|
||||
/**
|
||||
* Status Reporter that displays information about the progress/status of an artifact that is being uploaded or downloaded
|
||||
*
|
||||
* Variable display time that can be adjusted using the displayFrequencyInMilliseconds variable
|
||||
* The total status of the upload/download gets displayed according to this value
|
||||
* If there is a large file that is being uploaded, extra information about the individual status can also be displayed using the updateLargeFileStatus function
|
||||
*/
|
||||
|
||||
export class StatusReporter {
|
||||
private totalNumberOfFilesToProcess = 0
|
||||
private processedCount = 0
|
||||
private displayFrequencyInMilliseconds: number
|
||||
private largeFiles = new Map<string, string>()
|
||||
private totalFileStatus: NodeJS.Timeout | undefined
|
||||
|
||||
constructor(displayFrequencyInMilliseconds: number) {
|
||||
this.totalFileStatus = undefined
|
||||
this.displayFrequencyInMilliseconds = displayFrequencyInMilliseconds
|
||||
}
|
||||
|
||||
setTotalNumberOfFilesToProcess(fileTotal: number): void {
|
||||
this.totalNumberOfFilesToProcess = fileTotal
|
||||
this.processedCount = 0
|
||||
}
|
||||
|
||||
start(): void {
|
||||
// displays information about the total upload/download status
|
||||
this.totalFileStatus = setInterval(() => {
|
||||
// display 1 decimal place without any rounding
|
||||
const percentage = this.formatPercentage(
|
||||
this.processedCount,
|
||||
this.totalNumberOfFilesToProcess
|
||||
)
|
||||
info(
|
||||
`Total file count: ${
|
||||
this.totalNumberOfFilesToProcess
|
||||
} ---- Processed file #${this.processedCount} (${percentage.slice(
|
||||
0,
|
||||
percentage.indexOf('.') + 2
|
||||
)}%)`
|
||||
)
|
||||
}, this.displayFrequencyInMilliseconds)
|
||||
}
|
||||
|
||||
// if there is a large file that is being uploaded in chunks, this is used to display extra information about the status of the upload
|
||||
updateLargeFileStatus(
|
||||
fileName: string,
|
||||
chunkStartIndex: number,
|
||||
chunkEndIndex: number,
|
||||
totalUploadFileSize: number
|
||||
): void {
|
||||
// display 1 decimal place without any rounding
|
||||
const percentage = this.formatPercentage(chunkEndIndex, totalUploadFileSize)
|
||||
info(
|
||||
`Uploaded ${fileName} (${percentage.slice(
|
||||
0,
|
||||
percentage.indexOf('.') + 2
|
||||
)}%) bytes ${chunkStartIndex}:${chunkEndIndex}`
|
||||
)
|
||||
}
|
||||
|
||||
stop(): void {
|
||||
if (this.totalFileStatus) {
|
||||
clearInterval(this.totalFileStatus)
|
||||
}
|
||||
}
|
||||
|
||||
incrementProcessedCount(): void {
|
||||
this.processedCount++
|
||||
}
|
||||
|
||||
private formatPercentage(numerator: number, denominator: number): string {
|
||||
// toFixed() rounds, so use extra precision to display accurate information even though 4 decimal places are not displayed
|
||||
return ((numerator / denominator) * 100).toFixed(4).toString()
|
||||
}
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
import * as fs from 'fs'
|
||||
import * as zlib from 'zlib'
|
||||
import {promisify} from 'util'
|
||||
const stat = promisify(fs.stat)
|
||||
|
||||
/**
|
||||
* GZipping certain files that are already compressed will likely not yield further size reductions. Creating large temporary gzip
|
||||
* files then will just waste a lot of time before ultimately being discarded (especially for very large files).
|
||||
* If any of these types of files are encountered then on-disk gzip creation will be skipped and the original file will be uploaded as-is
|
||||
*/
|
||||
const gzipExemptFileExtensions = [
|
||||
'.gz', // GZIP
|
||||
'.gzip', // GZIP
|
||||
'.tgz', // GZIP
|
||||
'.taz', // GZIP
|
||||
'.Z', // COMPRESS
|
||||
'.taZ', // COMPRESS
|
||||
'.bz2', // BZIP2
|
||||
'.tbz', // BZIP2
|
||||
'.tbz2', // BZIP2
|
||||
'.tz2', // BZIP2
|
||||
'.lz', // LZIP
|
||||
'.lzma', // LZMA
|
||||
'.tlz', // LZMA
|
||||
'.lzo', // LZOP
|
||||
'.xz', // XZ
|
||||
'.txz', // XZ
|
||||
'.zst', // ZSTD
|
||||
'.zstd', // ZSTD
|
||||
'.tzst', // ZSTD
|
||||
'.zip', // ZIP
|
||||
'.7z' // 7ZIP
|
||||
]
|
||||
|
||||
/**
|
||||
* Creates a Gzip compressed file of an original file at the provided temporary filepath location
|
||||
* @param {string} originalFilePath filepath of whatever will be compressed. The original file will be unmodified
|
||||
* @param {string} tempFilePath the location of where the Gzip file will be created
|
||||
* @returns the size of gzip file that gets created
|
||||
*/
|
||||
export async function createGZipFileOnDisk(
|
||||
originalFilePath: string,
|
||||
tempFilePath: string
|
||||
): Promise<number> {
|
||||
for (const gzipExemptExtension of gzipExemptFileExtensions) {
|
||||
if (originalFilePath.endsWith(gzipExemptExtension)) {
|
||||
// return a really large number so that the original file gets uploaded
|
||||
return Number.MAX_SAFE_INTEGER
|
||||
}
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const inputStream = fs.createReadStream(originalFilePath)
|
||||
const gzip = zlib.createGzip()
|
||||
const outputStream = fs.createWriteStream(tempFilePath)
|
||||
inputStream.pipe(gzip).pipe(outputStream)
|
||||
outputStream.on('finish', async () => {
|
||||
// wait for stream to finish before calculating the size which is needed as part of the Content-Length header when starting an upload
|
||||
const size = (await stat(tempFilePath)).size
|
||||
resolve(size)
|
||||
})
|
||||
outputStream.on('error', error => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(error)
|
||||
reject(error)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a GZip file in memory using a buffer. Should be used for smaller files to reduce disk I/O
|
||||
* @param originalFilePath the path to the original file that is being GZipped
|
||||
* @returns a buffer with the GZip file
|
||||
*/
|
||||
export async function createGZipFileInBuffer(
|
||||
originalFilePath: string
|
||||
): Promise<Buffer> {
|
||||
return new Promise(async resolve => {
|
||||
const inputStream = fs.createReadStream(originalFilePath)
|
||||
const gzip = zlib.createGzip()
|
||||
inputStream.pipe(gzip)
|
||||
// read stream into buffer, using experimental async iterators see https://github.com/nodejs/readable-stream/issues/403#issuecomment-479069043
|
||||
const chunks = []
|
||||
for await (const chunk of gzip) {
|
||||
chunks.push(chunk)
|
||||
}
|
||||
resolve(Buffer.concat(chunks))
|
||||
})
|
||||
}
|
|
@ -1,567 +0,0 @@
|
|||
import * as fs from 'fs'
|
||||
import * as core from '@actions/core'
|
||||
import * as tmp from 'tmp-promise'
|
||||
import * as stream from 'stream'
|
||||
import {
|
||||
ArtifactResponse,
|
||||
CreateArtifactParameters,
|
||||
PatchArtifactSize,
|
||||
UploadResults
|
||||
} from './contracts'
|
||||
import {
|
||||
digestForStream,
|
||||
getArtifactUrl,
|
||||
getContentRange,
|
||||
getUploadHeaders,
|
||||
isRetryableStatusCode,
|
||||
isSuccessStatusCode,
|
||||
isThrottledStatusCode,
|
||||
displayHttpDiagnostics,
|
||||
getExponentialRetryTimeInMilliseconds,
|
||||
tryGetRetryAfterValueTimeInMilliseconds,
|
||||
getProperRetention,
|
||||
sleep
|
||||
} from './utils'
|
||||
import {
|
||||
getUploadChunkSize,
|
||||
getUploadFileConcurrency,
|
||||
getRetryLimit,
|
||||
getRetentionDays
|
||||
} from './config-variables'
|
||||
import {promisify} from 'util'
|
||||
import {URL} from 'url'
|
||||
import {performance} from 'perf_hooks'
|
||||
import {StatusReporter} from './status-reporter'
|
||||
import {HttpCodes, HttpClientResponse} from '@actions/http-client'
|
||||
import {HttpManager} from './http-manager'
|
||||
import {UploadSpecification} from './upload-specification'
|
||||
import {UploadOptions} from './upload-options'
|
||||
import {createGZipFileOnDisk, createGZipFileInBuffer} from './upload-gzip'
|
||||
import {retryHttpClientRequest} from './requestUtils'
|
||||
const stat = promisify(fs.stat)
|
||||
|
||||
export class UploadHttpClient {
|
||||
private uploadHttpManager: HttpManager
|
||||
private statusReporter: StatusReporter
|
||||
|
||||
constructor() {
|
||||
this.uploadHttpManager = new HttpManager(
|
||||
getUploadFileConcurrency(),
|
||||
'@actions/artifact-upload'
|
||||
)
|
||||
this.statusReporter = new StatusReporter(10000)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a file container for the new artifact in the remote blob storage/file service
|
||||
* @param {string} artifactName Name of the artifact being created
|
||||
* @returns The response from the Artifact Service if the file container was successfully created
|
||||
*/
|
||||
async createArtifactInFileContainer(
|
||||
artifactName: string,
|
||||
options?: UploadOptions | undefined
|
||||
): Promise<ArtifactResponse> {
|
||||
const parameters: CreateArtifactParameters = {
|
||||
Type: 'actions_storage',
|
||||
Name: artifactName
|
||||
}
|
||||
|
||||
// calculate retention period
|
||||
if (options && options.retentionDays) {
|
||||
const maxRetentionStr = getRetentionDays()
|
||||
parameters.RetentionDays = getProperRetention(
|
||||
options.retentionDays,
|
||||
maxRetentionStr
|
||||
)
|
||||
}
|
||||
|
||||
const data: string = JSON.stringify(parameters, null, 2)
|
||||
const artifactUrl = getArtifactUrl()
|
||||
|
||||
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
|
||||
const client = this.uploadHttpManager.getClient(0)
|
||||
const headers = getUploadHeaders('application/json', false)
|
||||
|
||||
// Extra information to display when a particular HTTP code is returned
|
||||
// If a 403 is returned when trying to create a file container, the customer has exceeded
|
||||
// their storage quota so no new artifact containers can be created
|
||||
const customErrorMessages: Map<number, string> = new Map([
|
||||
[
|
||||
HttpCodes.Forbidden,
|
||||
'Artifact storage quota has been hit. Unable to upload any new artifacts'
|
||||
],
|
||||
[
|
||||
HttpCodes.BadRequest,
|
||||
`The artifact name ${artifactName} is not valid. Request URL ${artifactUrl}`
|
||||
]
|
||||
])
|
||||
|
||||
const response = await retryHttpClientRequest(
|
||||
'Create Artifact Container',
|
||||
async () => client.post(artifactUrl, data, headers),
|
||||
customErrorMessages
|
||||
)
|
||||
const body: string = await response.readBody()
|
||||
return JSON.parse(body)
|
||||
}
|
||||
|
||||
/**
|
||||
* Concurrently upload all of the files in chunks
|
||||
* @param {string} uploadUrl Base Url for the artifact that was created
|
||||
* @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
|
||||
* @returns The size of all the files uploaded in bytes
|
||||
*/
|
||||
async uploadArtifactToFileContainer(
|
||||
uploadUrl: string,
|
||||
filesToUpload: UploadSpecification[],
|
||||
options?: UploadOptions
|
||||
): Promise<UploadResults> {
|
||||
const FILE_CONCURRENCY = getUploadFileConcurrency()
|
||||
const MAX_CHUNK_SIZE = getUploadChunkSize()
|
||||
core.debug(
|
||||
`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`
|
||||
)
|
||||
|
||||
const parameters: UploadFileParameters[] = []
|
||||
// by default, file uploads will continue if there is an error unless specified differently in the options
|
||||
let continueOnError = true
|
||||
if (options) {
|
||||
if (options.continueOnError === false) {
|
||||
continueOnError = false
|
||||
}
|
||||
}
|
||||
|
||||
// prepare the necessary parameters to upload all the files
|
||||
for (const file of filesToUpload) {
|
||||
const resourceUrl = new URL(uploadUrl)
|
||||
resourceUrl.searchParams.append('itemPath', file.uploadFilePath)
|
||||
parameters.push({
|
||||
file: file.absoluteFilePath,
|
||||
resourceUrl: resourceUrl.toString(),
|
||||
maxChunkSize: MAX_CHUNK_SIZE,
|
||||
continueOnError
|
||||
})
|
||||
}
|
||||
|
||||
const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()]
|
||||
const failedItemsToReport: string[] = []
|
||||
let currentFile = 0
|
||||
let completedFiles = 0
|
||||
let uploadFileSize = 0
|
||||
let totalFileSize = 0
|
||||
let abortPendingFileUploads = false
|
||||
|
||||
this.statusReporter.setTotalNumberOfFilesToProcess(filesToUpload.length)
|
||||
this.statusReporter.start()
|
||||
|
||||
// only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
|
||||
await Promise.all(
|
||||
parallelUploads.map(async index => {
|
||||
while (currentFile < filesToUpload.length) {
|
||||
const currentFileParameters = parameters[currentFile]
|
||||
currentFile += 1
|
||||
if (abortPendingFileUploads) {
|
||||
failedItemsToReport.push(currentFileParameters.file)
|
||||
continue
|
||||
}
|
||||
|
||||
const startTime = performance.now()
|
||||
const uploadFileResult = await this.uploadFileAsync(
|
||||
index,
|
||||
currentFileParameters
|
||||
)
|
||||
|
||||
if (core.isDebug()) {
|
||||
core.debug(
|
||||
`File: ${++completedFiles}/${filesToUpload.length}. ${
|
||||
currentFileParameters.file
|
||||
} took ${(performance.now() - startTime).toFixed(
|
||||
3
|
||||
)} milliseconds to finish upload`
|
||||
)
|
||||
}
|
||||
|
||||
uploadFileSize += uploadFileResult.successfulUploadSize
|
||||
totalFileSize += uploadFileResult.totalSize
|
||||
if (uploadFileResult.isSuccess === false) {
|
||||
failedItemsToReport.push(currentFileParameters.file)
|
||||
if (!continueOnError) {
|
||||
// fail fast
|
||||
core.error(`aborting artifact upload`)
|
||||
abortPendingFileUploads = true
|
||||
}
|
||||
}
|
||||
this.statusReporter.incrementProcessedCount()
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
this.statusReporter.stop()
|
||||
// done uploading, safety dispose all connections
|
||||
this.uploadHttpManager.disposeAndReplaceAllClients()
|
||||
|
||||
core.info(`Total size of all the files uploaded is ${uploadFileSize} bytes`)
|
||||
return {
|
||||
uploadSize: uploadFileSize,
|
||||
totalSize: totalFileSize,
|
||||
failedItems: failedItemsToReport
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
|
||||
* If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
|
||||
* @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
|
||||
* @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
|
||||
* @returns The size of the file that was uploaded in bytes along with any failed uploads
|
||||
*/
|
||||
private async uploadFileAsync(
|
||||
httpClientIndex: number,
|
||||
parameters: UploadFileParameters
|
||||
): Promise<UploadFileResult> {
|
||||
const fileStat: fs.Stats = await stat(parameters.file)
|
||||
const totalFileSize = fileStat.size
|
||||
const isFIFO = fileStat.isFIFO()
|
||||
let offset = 0
|
||||
let isUploadSuccessful = true
|
||||
let failedChunkSizes = 0
|
||||
let uploadFileSize = 0
|
||||
let isGzip = true
|
||||
|
||||
// the file that is being uploaded is less than 64k in size to increase throughput and to minimize disk I/O
|
||||
// for creating a new GZip file, an in-memory buffer is used for compression
|
||||
// with named pipes the file size is reported as zero in that case don't read the file in memory
|
||||
if (!isFIFO && totalFileSize < 65536) {
|
||||
core.debug(
|
||||
`${parameters.file} is less than 64k in size. Creating a gzip file in-memory to potentially reduce the upload size`
|
||||
)
|
||||
const buffer = await createGZipFileInBuffer(parameters.file)
|
||||
|
||||
// An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
|
||||
// it will not properly get reset to the start of the stream if a chunk upload needs to be retried
|
||||
let openUploadStream: () => NodeJS.ReadableStream
|
||||
|
||||
if (totalFileSize < buffer.byteLength) {
|
||||
// compression did not help with reducing the size, use a readable stream from the original file for upload
|
||||
core.debug(
|
||||
`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`
|
||||
)
|
||||
openUploadStream = () => fs.createReadStream(parameters.file)
|
||||
isGzip = false
|
||||
uploadFileSize = totalFileSize
|
||||
} else {
|
||||
// create a readable stream using a PassThrough stream that is both readable and writable
|
||||
core.debug(
|
||||
`A gzip file created for ${parameters.file} helped with reducing the size of the original file. The file will be uploaded using gzip.`
|
||||
)
|
||||
openUploadStream = () => {
|
||||
const passThrough = new stream.PassThrough()
|
||||
passThrough.end(buffer)
|
||||
return passThrough
|
||||
}
|
||||
uploadFileSize = buffer.byteLength
|
||||
}
|
||||
|
||||
const result = await this.uploadChunk(
|
||||
httpClientIndex,
|
||||
parameters.resourceUrl,
|
||||
openUploadStream,
|
||||
0,
|
||||
uploadFileSize - 1,
|
||||
uploadFileSize,
|
||||
isGzip,
|
||||
totalFileSize
|
||||
)
|
||||
|
||||
if (!result) {
|
||||
// chunk failed to upload
|
||||
isUploadSuccessful = false
|
||||
failedChunkSizes += uploadFileSize
|
||||
core.warning(`Aborting upload for ${parameters.file} due to failure`)
|
||||
}
|
||||
|
||||
return {
|
||||
isSuccess: isUploadSuccessful,
|
||||
successfulUploadSize: uploadFileSize - failedChunkSizes,
|
||||
totalSize: totalFileSize
|
||||
}
|
||||
} else {
|
||||
// the file that is being uploaded is greater than 64k in size, a temporary file gets created on disk using the
|
||||
// npm tmp-promise package and this file gets used to create a GZipped file
|
||||
const tempFile = await tmp.file()
|
||||
core.debug(
|
||||
`${parameters.file} is greater than 64k in size. Creating a gzip file on-disk ${tempFile.path} to potentially reduce the upload size`
|
||||
)
|
||||
|
||||
// create a GZip file of the original file being uploaded, the original file should not be modified in any way
|
||||
uploadFileSize = await createGZipFileOnDisk(
|
||||
parameters.file,
|
||||
tempFile.path
|
||||
)
|
||||
|
||||
let uploadFilePath = tempFile.path
|
||||
|
||||
// compression did not help with size reduction, use the original file for upload and delete the temp GZip file
|
||||
// for named pipes totalFileSize is zero, this assumes compression did help
|
||||
if (!isFIFO && totalFileSize < uploadFileSize) {
|
||||
core.debug(
|
||||
`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`
|
||||
)
|
||||
uploadFileSize = totalFileSize
|
||||
uploadFilePath = parameters.file
|
||||
isGzip = false
|
||||
} else {
|
||||
core.debug(
|
||||
`The gzip file created for ${parameters.file} is smaller than the original file. The file will be uploaded using gzip.`
|
||||
)
|
||||
}
|
||||
|
||||
let abortFileUpload = false
|
||||
// upload only a single chunk at a time
|
||||
while (offset < uploadFileSize) {
|
||||
const chunkSize = Math.min(
|
||||
uploadFileSize - offset,
|
||||
parameters.maxChunkSize
|
||||
)
|
||||
|
||||
const startChunkIndex = offset
|
||||
const endChunkIndex = offset + chunkSize - 1
|
||||
offset += parameters.maxChunkSize
|
||||
|
||||
if (abortFileUpload) {
|
||||
// if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
|
||||
failedChunkSizes += chunkSize
|
||||
continue
|
||||
}
|
||||
|
||||
const result = await this.uploadChunk(
|
||||
httpClientIndex,
|
||||
parameters.resourceUrl,
|
||||
() =>
|
||||
fs.createReadStream(uploadFilePath, {
|
||||
start: startChunkIndex,
|
||||
end: endChunkIndex,
|
||||
autoClose: false
|
||||
}),
|
||||
startChunkIndex,
|
||||
endChunkIndex,
|
||||
uploadFileSize,
|
||||
isGzip,
|
||||
totalFileSize
|
||||
)
|
||||
|
||||
if (!result) {
|
||||
// Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
|
||||
// successfully uploaded so the server may report a different size for what was uploaded
|
||||
isUploadSuccessful = false
|
||||
failedChunkSizes += chunkSize
|
||||
core.warning(`Aborting upload for ${parameters.file} due to failure`)
|
||||
abortFileUpload = true
|
||||
} else {
|
||||
// if an individual file is greater than 8MB (1024*1024*8) in size, display extra information about the upload status
|
||||
if (uploadFileSize > 8388608) {
|
||||
this.statusReporter.updateLargeFileStatus(
|
||||
parameters.file,
|
||||
startChunkIndex,
|
||||
endChunkIndex,
|
||||
uploadFileSize
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the temporary file that was created as part of the upload. If the temp file does not get manually deleted by
|
||||
// calling cleanup, it gets removed when the node process exits. For more info see: https://www.npmjs.com/package/tmp-promise#about
|
||||
core.debug(`deleting temporary gzip file ${tempFile.path}`)
|
||||
await tempFile.cleanup()
|
||||
|
||||
return {
|
||||
isSuccess: isUploadSuccessful,
|
||||
successfulUploadSize: uploadFileSize - failedChunkSizes,
|
||||
totalSize: totalFileSize
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
|
||||
* indicates a retryable status, we try to upload the chunk as well
|
||||
* @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
|
||||
* @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
|
||||
* @param {NodeJS.ReadableStream} openStream Stream of the file that will be uploaded
|
||||
* @param {number} start Starting byte index of file that the chunk belongs to
|
||||
* @param {number} end Ending byte index of file that the chunk belongs to
|
||||
* @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
|
||||
* @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
|
||||
* @param {number} totalFileSize Original total size of the file that is being uploaded
|
||||
* @returns if the chunk was successfully uploaded
|
||||
*/
|
||||
private async uploadChunk(
|
||||
httpClientIndex: number,
|
||||
resourceUrl: string,
|
||||
openStream: () => NodeJS.ReadableStream,
|
||||
start: number,
|
||||
end: number,
|
||||
uploadFileSize: number,
|
||||
isGzip: boolean,
|
||||
totalFileSize: number
|
||||
): Promise<boolean> {
|
||||
// open a new stream and read it to compute the digest
|
||||
const digest = await digestForStream(openStream())
|
||||
|
||||
// prepare all the necessary headers before making any http call
|
||||
const headers = getUploadHeaders(
|
||||
'application/octet-stream',
|
||||
true,
|
||||
isGzip,
|
||||
totalFileSize,
|
||||
end - start + 1,
|
||||
getContentRange(start, end, uploadFileSize),
|
||||
digest
|
||||
)
|
||||
|
||||
const uploadChunkRequest = async (): Promise<HttpClientResponse> => {
|
||||
const client = this.uploadHttpManager.getClient(httpClientIndex)
|
||||
return await client.sendStream('PUT', resourceUrl, openStream(), headers)
|
||||
}
|
||||
|
||||
let retryCount = 0
|
||||
const retryLimit = getRetryLimit()
|
||||
|
||||
// Increments the current retry count and then checks if the retry limit has been reached
|
||||
// If there have been too many retries, fail so the download stops
|
||||
const incrementAndCheckRetryLimit = (
|
||||
response?: HttpClientResponse
|
||||
): boolean => {
|
||||
retryCount++
|
||||
if (retryCount > retryLimit) {
|
||||
if (response) {
|
||||
displayHttpDiagnostics(response)
|
||||
}
|
||||
core.info(
|
||||
`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`
|
||||
)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const backOff = async (retryAfterValue?: number): Promise<void> => {
|
||||
this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex)
|
||||
if (retryAfterValue) {
|
||||
core.info(
|
||||
`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the upload`
|
||||
)
|
||||
await sleep(retryAfterValue)
|
||||
} else {
|
||||
const backoffTime = getExponentialRetryTimeInMilliseconds(retryCount)
|
||||
core.info(
|
||||
`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the upload at offset ${start}`
|
||||
)
|
||||
await sleep(backoffTime)
|
||||
}
|
||||
core.info(
|
||||
`Finished backoff for retry #${retryCount}, continuing with upload`
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// allow for failed chunks to be retried multiple times
|
||||
while (retryCount <= retryLimit) {
|
||||
let response: HttpClientResponse
|
||||
|
||||
try {
|
||||
response = await uploadChunkRequest()
|
||||
} catch (error) {
|
||||
// if an error is caught, it is usually indicative of a timeout so retry the upload
|
||||
core.info(
|
||||
`An error has been caught http-client index ${httpClientIndex}, retrying the upload`
|
||||
)
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(error)
|
||||
|
||||
if (incrementAndCheckRetryLimit()) {
|
||||
return false
|
||||
}
|
||||
await backOff()
|
||||
continue
|
||||
}
|
||||
|
||||
// Always read the body of the response. There is potential for a resource leak if the body is not read which will
|
||||
// result in the connection remaining open along with unintended consequences when trying to dispose of the client
|
||||
await response.readBody()
|
||||
|
||||
if (isSuccessStatusCode(response.message.statusCode)) {
|
||||
return true
|
||||
} else if (isRetryableStatusCode(response.message.statusCode)) {
|
||||
core.info(
|
||||
`A ${response.message.statusCode} status code has been received, will attempt to retry the upload`
|
||||
)
|
||||
if (incrementAndCheckRetryLimit(response)) {
|
||||
return false
|
||||
}
|
||||
isThrottledStatusCode(response.message.statusCode)
|
||||
? await backOff(
|
||||
tryGetRetryAfterValueTimeInMilliseconds(response.message.headers)
|
||||
)
|
||||
: await backOff()
|
||||
} else {
|
||||
core.error(
|
||||
`Unexpected response. Unable to upload chunk to ${resourceUrl}`
|
||||
)
|
||||
displayHttpDiagnostics(response)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
|
||||
* Updating the size indicates that we are done uploading all the contents of the artifact
|
||||
*/
|
||||
async patchArtifactSize(size: number, artifactName: string): Promise<void> {
|
||||
const resourceUrl = new URL(getArtifactUrl())
|
||||
resourceUrl.searchParams.append('artifactName', artifactName)
|
||||
|
||||
const parameters: PatchArtifactSize = {Size: size}
|
||||
const data: string = JSON.stringify(parameters, null, 2)
|
||||
core.debug(`URL is ${resourceUrl.toString()}`)
|
||||
|
||||
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
|
||||
const client = this.uploadHttpManager.getClient(0)
|
||||
const headers = getUploadHeaders('application/json', false)
|
||||
|
||||
// Extra information to display when a particular HTTP code is returned
|
||||
const customErrorMessages: Map<number, string> = new Map([
|
||||
[
|
||||
HttpCodes.NotFound,
|
||||
`An Artifact with the name ${artifactName} was not found`
|
||||
]
|
||||
])
|
||||
|
||||
// TODO retry for all possible response codes, the artifact upload is pretty much complete so it at all costs we should try to finish this
|
||||
const response = await retryHttpClientRequest(
|
||||
'Finalize artifact upload',
|
||||
async () => client.patch(resourceUrl.toString(), data, headers),
|
||||
customErrorMessages
|
||||
)
|
||||
await response.readBody()
|
||||
core.debug(
|
||||
`Artifact ${artifactName} has been successfully uploaded, total size in bytes: ${size}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
interface UploadFileParameters {
|
||||
file: string
|
||||
resourceUrl: string
|
||||
maxChunkSize: number
|
||||
continueOnError: boolean
|
||||
}
|
||||
|
||||
interface UploadFileResult {
|
||||
isSuccess: boolean
|
||||
successfulUploadSize: number
|
||||
totalSize: number
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
export interface UploadOptions {
|
||||
/**
|
||||
* Indicates if the artifact upload should continue if file or chunk fails to upload from any error.
|
||||
* If there is a error during upload, a partial artifact will always be associated and available for
|
||||
* download at the end. The size reported will be the amount of storage that the user or org will be
|
||||
* charged for the partial artifact. Defaults to true if not specified
|
||||
*
|
||||
* If set to false, and an error is encountered, all other uploads will stop and any files or chunks
|
||||
* that were queued will not be attempted to be uploaded. The partial artifact available will only
|
||||
* include files and chunks up until the failure
|
||||
*
|
||||
* If set to true and an error is encountered, the failed file will be skipped and ignored and all
|
||||
* other queued files will be attempted to be uploaded. The partial artifact at the end will have all
|
||||
* files with the exception of the problematic files(s)/chunks(s) that failed to upload
|
||||
*
|
||||
*/
|
||||
continueOnError?: boolean
|
||||
|
||||
/**
|
||||
* Duration after which artifact will expire in days.
|
||||
*
|
||||
* By default artifact expires after 90 days:
|
||||
* https://docs.github.com/en/actions/configuring-and-managing-workflows/persisting-workflow-data-using-artifacts#downloading-and-deleting-artifacts-after-a-workflow-run-is-complete
|
||||
*
|
||||
* Use this option to override the default expiry.
|
||||
*
|
||||
* Min value: 1
|
||||
* Max value: 90 unless changed by repository setting
|
||||
*
|
||||
* If this is set to a greater value than the retention settings allowed, the retention on artifacts
|
||||
* will be reduced to match the max value allowed on server, and the upload process will continue. An
|
||||
* input of 0 assumes default retention setting.
|
||||
*/
|
||||
retentionDays?: number
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
export interface UploadResponse {
|
||||
/**
|
||||
* The name of the artifact that was uploaded
|
||||
*/
|
||||
artifactName: string
|
||||
|
||||
/**
|
||||
* A list of all items that are meant to be uploaded as part of the artifact
|
||||
*/
|
||||
artifactItems: string[]
|
||||
|
||||
/**
|
||||
* Total size of the artifact in bytes that was uploaded
|
||||
*/
|
||||
size: number
|
||||
|
||||
/**
|
||||
* A list of items that were not uploaded as part of the artifact (includes queued items that were not uploaded if
|
||||
* continueOnError is set to false). This is a subset of artifactItems.
|
||||
*/
|
||||
failedItems: string[]
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
import * as fs from 'fs'
|
||||
import {debug} from '@actions/core'
|
||||
import {join, normalize, resolve} from 'path'
|
||||
import {checkArtifactFilePath} from './path-and-artifact-name-validation'
|
||||
|
||||
export interface UploadSpecification {
|
||||
absoluteFilePath: string
|
||||
uploadFilePath: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a specification that describes how each file that is part of the artifact will be uploaded
|
||||
* @param artifactName the name of the artifact being uploaded. Used during upload to denote where the artifact is stored on the server
|
||||
* @param rootDirectory an absolute file path that denotes the path that should be removed from the beginning of each artifact file
|
||||
* @param artifactFiles a list of absolute file paths that denote what should be uploaded as part of the artifact
|
||||
*/
|
||||
export function getUploadSpecification(
|
||||
artifactName: string,
|
||||
rootDirectory: string,
|
||||
artifactFiles: string[]
|
||||
): UploadSpecification[] {
|
||||
// artifact name was checked earlier on, no need to check again
|
||||
const specifications: UploadSpecification[] = []
|
||||
|
||||
if (!fs.existsSync(rootDirectory)) {
|
||||
throw new Error(`Provided rootDirectory ${rootDirectory} does not exist`)
|
||||
}
|
||||
if (!fs.statSync(rootDirectory).isDirectory()) {
|
||||
throw new Error(
|
||||
`Provided rootDirectory ${rootDirectory} is not a valid directory`
|
||||
)
|
||||
}
|
||||
// Normalize and resolve, this allows for either absolute or relative paths to be used
|
||||
rootDirectory = normalize(rootDirectory)
|
||||
rootDirectory = resolve(rootDirectory)
|
||||
|
||||
/*
|
||||
Example to demonstrate behavior
|
||||
|
||||
Input:
|
||||
artifactName: my-artifact
|
||||
rootDirectory: '/home/user/files/plz-upload'
|
||||
artifactFiles: [
|
||||
'/home/user/files/plz-upload/file1.txt',
|
||||
'/home/user/files/plz-upload/file2.txt',
|
||||
'/home/user/files/plz-upload/dir/file3.txt'
|
||||
]
|
||||
|
||||
Output:
|
||||
specifications: [
|
||||
['/home/user/files/plz-upload/file1.txt', 'my-artifact/file1.txt'],
|
||||
['/home/user/files/plz-upload/file1.txt', 'my-artifact/file2.txt'],
|
||||
['/home/user/files/plz-upload/file1.txt', 'my-artifact/dir/file3.txt']
|
||||
]
|
||||
*/
|
||||
for (let file of artifactFiles) {
|
||||
if (!fs.existsSync(file)) {
|
||||
throw new Error(`File ${file} does not exist`)
|
||||
}
|
||||
if (!fs.statSync(file).isDirectory()) {
|
||||
// Normalize and resolve, this allows for either absolute or relative paths to be used
|
||||
file = normalize(file)
|
||||
file = resolve(file)
|
||||
if (!file.startsWith(rootDirectory)) {
|
||||
throw new Error(
|
||||
`The rootDirectory: ${rootDirectory} is not a parent directory of the file: ${file}`
|
||||
)
|
||||
}
|
||||
|
||||
// Check for forbidden characters in file paths that will be rejected during upload
|
||||
const uploadPath = file.replace(rootDirectory, '')
|
||||
checkArtifactFilePath(uploadPath)
|
||||
|
||||
/*
|
||||
uploadFilePath denotes where the file will be uploaded in the file container on the server. During a run, if multiple artifacts are uploaded, they will all
|
||||
be saved in the same container. The artifact name is used as the root directory in the container to separate and distinguish uploaded artifacts
|
||||
|
||||
path.join handles all the following cases and would return 'artifact-name/file-to-upload.txt
|
||||
join('artifact-name/', 'file-to-upload.txt')
|
||||
join('artifact-name/', '/file-to-upload.txt')
|
||||
join('artifact-name', 'file-to-upload.txt')
|
||||
join('artifact-name', '/file-to-upload.txt')
|
||||
*/
|
||||
specifications.push({
|
||||
absoluteFilePath: file,
|
||||
uploadFilePath: join(artifactName, uploadPath)
|
||||
})
|
||||
} else {
|
||||
// Directories are rejected by the server during upload
|
||||
debug(`Removing ${file} from rawSearchResults because it is a directory`)
|
||||
}
|
||||
}
|
||||
return specifications
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
import {UploadOptions} from './upload-options'
|
||||
import {UploadResponse} from './upload-response'
|
||||
|
||||
export async function uploadArtifact(
|
||||
name: string,
|
||||
files: string[], // eslint-disable-line @typescript-eslint/no-unused-vars
|
||||
rootDirectory: string, // eslint-disable-line @typescript-eslint/no-unused-vars
|
||||
options?: UploadOptions | undefined // eslint-disable-line @typescript-eslint/no-unused-vars
|
||||
): Promise<UploadResponse> {
|
||||
// TODO - Implement upload functionality
|
||||
|
||||
const uploadResponse: UploadResponse = {
|
||||
artifactName: name,
|
||||
size: 0
|
||||
}
|
||||
|
||||
return uploadResponse
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
export interface UploadOptions {
|
||||
/**
|
||||
* Duration after which artifact will expire in days.
|
||||
*
|
||||
* By default artifact expires after 90 days:
|
||||
* https://docs.github.com/en/actions/configuring-and-managing-workflows/persisting-workflow-data-using-artifacts#downloading-and-deleting-artifacts-after-a-workflow-run-is-complete
|
||||
*
|
||||
* Use this option to override the default expiry.
|
||||
*
|
||||
* Min value: 1
|
||||
* Max value: 90 unless changed by repository setting
|
||||
*
|
||||
* If this is set to a greater value than the retention settings allowed, the retention on artifacts
|
||||
* will be reduced to match the max value allowed on server, and the upload process will continue. An
|
||||
* input of 0 assumes default retention setting.
|
||||
*/
|
||||
retentionDays?: number
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
export interface UploadResponse {
|
||||
/**
|
||||
* The name of the artifact that was uploaded
|
||||
*/
|
||||
artifactName: string
|
||||
|
||||
/**
|
||||
* Total size of the artifact that was uploaded in bytes
|
||||
*/
|
||||
size: number
|
||||
}
|
|
@ -1,325 +0,0 @@
|
|||
import crypto from 'crypto'
|
||||
import {promises as fs} from 'fs'
|
||||
import {IncomingHttpHeaders, OutgoingHttpHeaders} from 'http'
|
||||
import {debug, info, warning} from '@actions/core'
|
||||
import {HttpCodes, HttpClient, HttpClientResponse} from '@actions/http-client'
|
||||
import {BearerCredentialHandler} from '@actions/http-client/lib/auth'
|
||||
import {
|
||||
getRuntimeToken,
|
||||
getRuntimeUrl,
|
||||
getWorkFlowRunId,
|
||||
getRetryMultiplier,
|
||||
getInitialRetryIntervalInMilliseconds
|
||||
} from './config-variables'
|
||||
import CRC64 from './crc64'
|
||||
|
||||
/**
|
||||
* Returns a retry time in milliseconds that exponentially gets larger
|
||||
* depending on the amount of retries that have been attempted
|
||||
*/
|
||||
export function getExponentialRetryTimeInMilliseconds(
|
||||
retryCount: number
|
||||
): number {
|
||||
if (retryCount < 0) {
|
||||
throw new Error('RetryCount should not be negative')
|
||||
} else if (retryCount === 0) {
|
||||
return getInitialRetryIntervalInMilliseconds()
|
||||
}
|
||||
|
||||
const minTime =
|
||||
getInitialRetryIntervalInMilliseconds() * getRetryMultiplier() * retryCount
|
||||
const maxTime = minTime * getRetryMultiplier()
|
||||
|
||||
// returns a random number between the minTime (inclusive) and the maxTime (exclusive)
|
||||
return Math.trunc(Math.random() * (maxTime - minTime) + minTime)
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a env variable that is a number
|
||||
*/
|
||||
export function parseEnvNumber(key: string): number | undefined {
|
||||
const value = Number(process.env[key])
|
||||
if (Number.isNaN(value) || value < 0) {
|
||||
return undefined
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
/**
|
||||
* Various utility functions to help with the necessary API calls
|
||||
*/
|
||||
export function getApiVersion(): string {
|
||||
return '6.0-preview'
|
||||
}
|
||||
|
||||
export function isSuccessStatusCode(statusCode?: number): boolean {
|
||||
if (!statusCode) {
|
||||
return false
|
||||
}
|
||||
return statusCode >= 200 && statusCode < 300
|
||||
}
|
||||
|
||||
export function isForbiddenStatusCode(statusCode?: number): boolean {
|
||||
if (!statusCode) {
|
||||
return false
|
||||
}
|
||||
return statusCode === HttpCodes.Forbidden
|
||||
}
|
||||
|
||||
export function isRetryableStatusCode(statusCode: number | undefined): boolean {
|
||||
if (!statusCode) {
|
||||
return false
|
||||
}
|
||||
|
||||
const retryableStatusCodes = [
|
||||
HttpCodes.BadGateway,
|
||||
HttpCodes.GatewayTimeout,
|
||||
HttpCodes.InternalServerError,
|
||||
HttpCodes.ServiceUnavailable,
|
||||
HttpCodes.TooManyRequests,
|
||||
413 // Payload Too Large
|
||||
]
|
||||
return retryableStatusCodes.includes(statusCode)
|
||||
}
|
||||
|
||||
export function isThrottledStatusCode(statusCode?: number): boolean {
|
||||
if (!statusCode) {
|
||||
return false
|
||||
}
|
||||
return statusCode === HttpCodes.TooManyRequests
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to get the retry-after value from a set of http headers. The retry time
|
||||
* is originally denoted in seconds, so if present, it is converted to milliseconds
|
||||
* @param headers all the headers received when making an http call
|
||||
*/
|
||||
export function tryGetRetryAfterValueTimeInMilliseconds(
|
||||
headers: IncomingHttpHeaders
|
||||
): number | undefined {
|
||||
if (headers['retry-after']) {
|
||||
const retryTime = Number(headers['retry-after'])
|
||||
if (!isNaN(retryTime)) {
|
||||
info(`Retry-After header is present with a value of ${retryTime}`)
|
||||
return retryTime * 1000
|
||||
}
|
||||
info(
|
||||
`Returned retry-after header value: ${retryTime} is non-numeric and cannot be used`
|
||||
)
|
||||
return undefined
|
||||
}
|
||||
info(
|
||||
`No retry-after header was found. Dumping all headers for diagnostic purposes`
|
||||
)
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(headers)
|
||||
return undefined
|
||||
}
|
||||
|
||||
export function getContentRange(
|
||||
start: number,
|
||||
end: number,
|
||||
total: number
|
||||
): string {
|
||||
// Format: `bytes start-end/fileSize
|
||||
// start and end are inclusive
|
||||
// For a 200 byte chunk starting at byte 0:
|
||||
// Content-Range: bytes 0-199/200
|
||||
return `bytes ${start}-${end}/${total}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets all the necessary headers when downloading an artifact
|
||||
* @param {string} contentType the type of content being uploaded
|
||||
* @param {boolean} isKeepAlive is the same connection being used to make multiple calls
|
||||
* @param {boolean} acceptGzip can we accept a gzip encoded response
|
||||
* @param {string} acceptType the type of content that we can accept
|
||||
* @returns appropriate headers to make a specific http call during artifact download
|
||||
*/
|
||||
export function getDownloadHeaders(
|
||||
contentType: string,
|
||||
isKeepAlive?: boolean,
|
||||
acceptGzip?: boolean
|
||||
): OutgoingHttpHeaders {
|
||||
const requestOptions: OutgoingHttpHeaders = {}
|
||||
|
||||
if (contentType) {
|
||||
requestOptions['Content-Type'] = contentType
|
||||
}
|
||||
if (isKeepAlive) {
|
||||
requestOptions['Connection'] = 'Keep-Alive'
|
||||
// keep alive for at least 10 seconds before closing the connection
|
||||
requestOptions['Keep-Alive'] = '10'
|
||||
}
|
||||
if (acceptGzip) {
|
||||
// if we are expecting a response with gzip encoding, it should be using an octet-stream in the accept header
|
||||
requestOptions['Accept-Encoding'] = 'gzip'
|
||||
requestOptions[
|
||||
'Accept'
|
||||
] = `application/octet-stream;api-version=${getApiVersion()}`
|
||||
} else {
|
||||
// default to application/json if we are not working with gzip content
|
||||
requestOptions['Accept'] = `application/json;api-version=${getApiVersion()}`
|
||||
}
|
||||
|
||||
return requestOptions
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets all the necessary headers when uploading an artifact
|
||||
* @param {string} contentType the type of content being uploaded
|
||||
* @param {boolean} isKeepAlive is the same connection being used to make multiple calls
|
||||
* @param {boolean} isGzip is the connection being used to upload GZip compressed content
|
||||
* @param {number} uncompressedLength the original size of the content if something is being uploaded that has been compressed
|
||||
* @param {number} contentLength the length of the content that is being uploaded
|
||||
* @param {string} contentRange the range of the content that is being uploaded
|
||||
* @returns appropriate headers to make a specific http call during artifact upload
|
||||
*/
|
||||
export function getUploadHeaders(
|
||||
contentType: string,
|
||||
isKeepAlive?: boolean,
|
||||
isGzip?: boolean,
|
||||
uncompressedLength?: number,
|
||||
contentLength?: number,
|
||||
contentRange?: string,
|
||||
digest?: StreamDigest
|
||||
): OutgoingHttpHeaders {
|
||||
const requestOptions: OutgoingHttpHeaders = {}
|
||||
requestOptions['Accept'] = `application/json;api-version=${getApiVersion()}`
|
||||
if (contentType) {
|
||||
requestOptions['Content-Type'] = contentType
|
||||
}
|
||||
if (isKeepAlive) {
|
||||
requestOptions['Connection'] = 'Keep-Alive'
|
||||
// keep alive for at least 10 seconds before closing the connection
|
||||
requestOptions['Keep-Alive'] = '10'
|
||||
}
|
||||
if (isGzip) {
|
||||
requestOptions['Content-Encoding'] = 'gzip'
|
||||
requestOptions['x-tfs-filelength'] = uncompressedLength
|
||||
}
|
||||
if (contentLength) {
|
||||
requestOptions['Content-Length'] = contentLength
|
||||
}
|
||||
if (contentRange) {
|
||||
requestOptions['Content-Range'] = contentRange
|
||||
}
|
||||
if (digest) {
|
||||
requestOptions['x-actions-results-crc64'] = digest.crc64
|
||||
requestOptions['x-actions-results-md5'] = digest.md5
|
||||
}
|
||||
|
||||
return requestOptions
|
||||
}
|
||||
|
||||
export function createHttpClient(userAgent: string): HttpClient {
|
||||
return new HttpClient(userAgent, [
|
||||
new BearerCredentialHandler(getRuntimeToken())
|
||||
])
|
||||
}
|
||||
|
||||
export function getArtifactUrl(): string {
|
||||
const artifactUrl = `${getRuntimeUrl()}_apis/pipelines/workflows/${getWorkFlowRunId()}/artifacts?api-version=${getApiVersion()}`
|
||||
debug(`Artifact Url: ${artifactUrl}`)
|
||||
return artifactUrl
|
||||
}
|
||||
|
||||
/**
|
||||
* Uh oh! Something might have gone wrong during either upload or download. The IHtttpClientResponse object contains information
|
||||
* about the http call that was made by the actions http client. This information might be useful to display for diagnostic purposes, but
|
||||
* this entire object is really big and most of the information is not really useful. This function takes the response object and displays only
|
||||
* the information that we want.
|
||||
*
|
||||
* Certain information such as the TLSSocket and the Readable state are not really useful for diagnostic purposes so they can be avoided.
|
||||
* Other information such as the headers, the response code and message might be useful, so this is displayed.
|
||||
*/
|
||||
export function displayHttpDiagnostics(response: HttpClientResponse): void {
|
||||
info(
|
||||
`##### Begin Diagnostic HTTP information #####
|
||||
Status Code: ${response.message.statusCode}
|
||||
Status Message: ${response.message.statusMessage}
|
||||
Header Information: ${JSON.stringify(response.message.headers, undefined, 2)}
|
||||
###### End Diagnostic HTTP information ######`
|
||||
)
|
||||
}
|
||||
|
||||
export async function createDirectoriesForArtifact(
|
||||
directories: string[]
|
||||
): Promise<void> {
|
||||
for (const directory of directories) {
|
||||
await fs.mkdir(directory, {
|
||||
recursive: true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function createEmptyFilesForArtifact(
|
||||
emptyFilesToCreate: string[]
|
||||
): Promise<void> {
|
||||
for (const filePath of emptyFilesToCreate) {
|
||||
await (await fs.open(filePath, 'w')).close()
|
||||
}
|
||||
}
|
||||
|
||||
export async function getFileSize(filePath: string): Promise<number> {
|
||||
const stats = await fs.stat(filePath)
|
||||
debug(
|
||||
`${filePath} size:(${stats.size}) blksize:(${stats.blksize}) blocks:(${stats.blocks})`
|
||||
)
|
||||
return stats.size
|
||||
}
|
||||
|
||||
export async function rmFile(filePath: string): Promise<void> {
|
||||
await fs.unlink(filePath)
|
||||
}
|
||||
|
||||
export function getProperRetention(
|
||||
retentionInput: number,
|
||||
retentionSetting: string | undefined
|
||||
): number {
|
||||
if (retentionInput < 0) {
|
||||
throw new Error('Invalid retention, minimum value is 1.')
|
||||
}
|
||||
|
||||
let retention = retentionInput
|
||||
if (retentionSetting) {
|
||||
const maxRetention = parseInt(retentionSetting)
|
||||
if (!isNaN(maxRetention) && maxRetention < retention) {
|
||||
warning(
|
||||
`Retention days is greater than the max value allowed by the repository setting, reduce retention to ${maxRetention} days`
|
||||
)
|
||||
retention = maxRetention
|
||||
}
|
||||
}
|
||||
return retention
|
||||
}
|
||||
|
||||
export async function sleep(milliseconds: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, milliseconds))
|
||||
}
|
||||
|
||||
export interface StreamDigest {
|
||||
crc64: string
|
||||
md5: string
|
||||
}
|
||||
|
||||
export async function digestForStream(
|
||||
stream: NodeJS.ReadableStream
|
||||
): Promise<StreamDigest> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const crc64 = new CRC64()
|
||||
const md5 = crypto.createHash('md5')
|
||||
stream
|
||||
.on('data', data => {
|
||||
crc64.update(data)
|
||||
md5.update(data)
|
||||
})
|
||||
.on('end', () =>
|
||||
resolve({
|
||||
crc64: crc64.digest('base64') as string,
|
||||
md5: md5.digest('base64')
|
||||
})
|
||||
)
|
||||
.on('error', reject)
|
||||
})
|
||||
}
|
Loading…
Reference in New Issue