1
0
Fork 0

Updates to @actions/artifact package (#367)

* GZip implementation

* Optimizations and cleanup

* Update tests

* More test updates

* Update packages/artifact/src/internal-utils.ts

Co-Authored-By: Josh Gross <joshmgross@github.com>

* Clarification around Upload Paths

* Refactor to make http clients classes

* GZip fixes

* Documentation around compression

* More detailed status information during large uploads

* Pretty format

* Percentage updates without rounding

* Fix edge cases with formatting numbers

* Update packages/artifact/src/internal-utils.ts

Co-Authored-By: Josh Gross <joshmgross@github.com>

* Cleanup

* Small reorg with status reporter

* PR Feedback

* Cleanup + Simplification

* Test Cleanup

* Mock updates

* More cleanup

* Format fixes

* Overhaul to the http-manager

* Fix tests

* Promisify stats

* Documentation around implementation

* Improvements to documentation

* PR Feedback

* Remove Downloading multiple artifacts concurrently

Co-authored-by: Josh Gross <joshmgross@github.com>
pull/381/head
Konrad Pabjan 2020-03-12 14:50:27 +01:00 committed by GitHub
parent 5859d7172e
commit dffb5572a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1252 additions and 574 deletions

View File

@ -196,4 +196,6 @@ Each artifact will have the same `DownloadResponse` as if it was individually do
## Additional Documentation
Check out [additional-information](additional-information.md) for extra documentation.
Check out [additional-information](docs/additional-information.md) for extra documentation around usage, restrictions and behavior.
Check out [implementation-details](docs/implementation-details.md) for extra information about the implementation of this package.

View File

@ -3,3 +3,10 @@
### 0.1.0
- Initial release
### 0.1.1
- Fixes to TCP connections not closing
- GZip file compression to speed up downloads
- Improved logging and output
- Extra documentation

View File

@ -1,8 +1,8 @@
import * as path from 'path'
import * as core from '@actions/core'
import {URL} from 'url'
import {getDownloadSpecification} from '../src/internal-download-specification'
import {ContainerEntry} from '../src/internal-contracts'
import {getDownloadSpecification} from '../src/internal/download-specification'
import {ContainerEntry} from '../src/internal/contracts'
const artifact1Name = 'my-artifact'
const artifact2Name = 'my-artifact-extra'

View File

@ -3,17 +3,17 @@ import * as http from 'http'
import * as io from '../../io/src/io'
import * as net from 'net'
import * as path from 'path'
import * as configVariables from '../src/internal-config-variables'
import * as configVariables from '../src/internal/config-variables'
import {HttpClient, HttpClientResponse} from '@actions/http-client'
import * as downloadClient from '../src/internal-download-http-client'
import {DownloadHttpClient} from '../src/internal/download-http-client'
import {
ListArtifactsResponse,
QueryArtifactResponse
} from '../src/internal-contracts'
} from '../src/internal/contracts'
const root = path.join(__dirname, '_temp', 'artifact-download')
jest.mock('../src/internal-config-variables')
jest.mock('../src/internal/config-variables')
jest.mock('@actions/http-client')
describe('Download Tests', () => {
@ -32,7 +32,8 @@ describe('Download Tests', () => {
*/
it('List Artifacts - Success', async () => {
setupSuccessfulListArtifactsResponse()
const artifacts = await downloadClient.listArtifacts()
const downloadHttpClient = new DownloadHttpClient()
const artifacts = await downloadHttpClient.listArtifacts()
expect(artifacts.count).toEqual(2)
const artifactNames = artifacts.value.map(item => item.name)
@ -58,7 +59,8 @@ describe('Download Tests', () => {
it('List Artifacts - Failure', async () => {
setupFailedResponse()
expect(downloadClient.listArtifacts()).rejects.toThrow(
const downloadHttpClient = new DownloadHttpClient()
expect(downloadHttpClient.listArtifacts()).rejects.toThrow(
'Unable to list artifacts for the run'
)
})
@ -68,7 +70,8 @@ describe('Download Tests', () => {
*/
it('Container Items - Success', async () => {
setupSuccessfulContainerItemsResponse()
const response = await downloadClient.getContainerItems(
const downloadHttpClient = new DownloadHttpClient()
const response = await downloadHttpClient.getContainerItems(
'artifact-name',
configVariables.getRuntimeUrl()
)
@ -93,8 +96,9 @@ describe('Download Tests', () => {
it('Container Items - Failure', async () => {
setupFailedResponse()
const downloadHttpClient = new DownloadHttpClient()
expect(
downloadClient.getContainerItems(
downloadHttpClient.getContainerItems(
'artifact-name',
configVariables.getRuntimeUrl()
)

View File

@ -2,7 +2,7 @@ import * as io from '../../io/src/io'
import * as path from 'path'
import {promises as fs} from 'fs'
import * as core from '@actions/core'
import {getUploadSpecification} from '../src/internal-upload-specification'
import {getUploadSpecification} from '../src/internal/upload-specification'
const artifactName = 'my-artifact'
const root = path.join(__dirname, '_temp', 'upload-specification')

View File

@ -2,16 +2,16 @@ import * as http from 'http'
import * as io from '../../io/src/io'
import * as net from 'net'
import * as path from 'path'
import * as uploadHttpClient from '../src/internal-upload-http-client'
import {UploadHttpClient} from '../src/internal/upload-http-client'
import * as core from '@actions/core'
import {promises as fs} from 'fs'
import {getRuntimeUrl} from '../src/internal-config-variables'
import {getRuntimeUrl} from '../src/internal/config-variables'
import {HttpClient, HttpClientResponse} from '@actions/http-client'
import {
ArtifactResponse,
PatchArtifactSizeSuccessResponse
} from '../src/internal-contracts'
import {UploadSpecification} from '../src/internal-upload-specification'
} from '../src/internal/contracts'
import {UploadSpecification} from '../src/internal/upload-specification'
const root = path.join(__dirname, '_temp', 'artifact-upload')
const file1Path = path.join(root, 'file1.txt')
@ -26,7 +26,7 @@ let file3Size = 0
let file4Size = 0
let file5Size = 0
jest.mock('../src/internal-config-variables')
jest.mock('../src/internal/config-variables')
jest.mock('@actions/http-client')
describe('Upload Tests', () => {
@ -75,6 +75,7 @@ describe('Upload Tests', () => {
*/
it('Create Artifact - Success', async () => {
const artifactName = 'valid-artifact-name'
const uploadHttpClient = new UploadHttpClient()
const response = await uploadHttpClient.createArtifactInFileContainer(
artifactName
)
@ -93,6 +94,7 @@ describe('Upload Tests', () => {
it('Create Artifact - Failure', async () => {
const artifactName = 'invalid-artifact-name'
const uploadHttpClient = new UploadHttpClient()
expect(
uploadHttpClient.createArtifactInFileContainer(artifactName)
).rejects.toEqual(
@ -137,12 +139,13 @@ describe('Upload Tests', () => {
const expectedTotalSize =
file1Size + file2Size + file3Size + file4Size + file5Size
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
const uploadHttpClient = new UploadHttpClient()
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
uploadUrl,
uploadSpecification
)
expect(uploadResult.failedItems.length).toEqual(0)
expect(uploadResult.size).toEqual(expectedTotalSize)
expect(uploadResult.uploadSize).toEqual(expectedTotalSize)
})
it('Upload Artifact - Failed Single File Upload', async () => {
@ -154,12 +157,13 @@ describe('Upload Tests', () => {
]
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
const uploadHttpClient = new UploadHttpClient()
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
uploadUrl,
uploadSpecification
)
expect(uploadResult.failedItems.length).toEqual(1)
expect(uploadResult.size).toEqual(0)
expect(uploadResult.uploadSize).toEqual(0)
})
it('Upload Artifact - Partial Upload Continue On Error', async () => {
@ -189,13 +193,14 @@ describe('Upload Tests', () => {
const expectedPartialSize = file1Size + file2Size + file4Size + file5Size
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
const uploadHttpClient = new UploadHttpClient()
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
uploadUrl,
uploadSpecification,
{continueOnError: true}
)
expect(uploadResult.failedItems.length).toEqual(1)
expect(uploadResult.size).toEqual(expectedPartialSize)
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
})
it('Upload Artifact - Partial Upload Fail Fast', async () => {
@ -225,13 +230,14 @@ describe('Upload Tests', () => {
const expectedPartialSize = file1Size + file2Size + file3Size
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
const uploadHttpClient = new UploadHttpClient()
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
uploadUrl,
uploadSpecification,
{continueOnError: false}
)
expect(uploadResult.failedItems.length).toEqual(2)
expect(uploadResult.size).toEqual(expectedPartialSize)
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
})
it('Upload Artifact - Failed upload with no options', async () => {
@ -261,12 +267,13 @@ describe('Upload Tests', () => {
const expectedPartialSize = file1Size + file2Size + file3Size + file5Size
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
const uploadHttpClient = new UploadHttpClient()
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
uploadUrl,
uploadSpecification
)
expect(uploadResult.failedItems.length).toEqual(1)
expect(uploadResult.size).toEqual(expectedPartialSize)
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
})
it('Upload Artifact - Failed upload with empty options', async () => {
@ -296,25 +303,28 @@ describe('Upload Tests', () => {
const expectedPartialSize = file1Size + file2Size + file3Size + file5Size
const uploadUrl = `${getRuntimeUrl()}_apis/resources/Containers/13`
const uploadHttpClient = new UploadHttpClient()
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
uploadUrl,
uploadSpecification,
{}
)
expect(uploadResult.failedItems.length).toEqual(1)
expect(uploadResult.size).toEqual(expectedPartialSize)
expect(uploadResult.uploadSize).toEqual(expectedPartialSize)
})
/**
* Artifact Association Tests
*/
it('Associate Artifact - Success', async () => {
const uploadHttpClient = new UploadHttpClient()
expect(async () => {
uploadHttpClient.patchArtifactSize(130, 'my-artifact')
}).not.toThrow()
})
it('Associate Artifact - Not Found', async () => {
const uploadHttpClient = new UploadHttpClient()
expect(
uploadHttpClient.patchArtifactSize(100, 'non-existent-artifact')
).rejects.toThrow(
@ -323,6 +333,7 @@ describe('Upload Tests', () => {
})
it('Associate Artifact - Error', async () => {
const uploadHttpClient = new UploadHttpClient()
expect(
uploadHttpClient.patchArtifactSize(-2, 'my-artifact')
).rejects.toThrow('Unable to finish uploading artifact my-artifact')

View File

@ -1,12 +1,12 @@
import * as fs from 'fs'
import * as io from '../../io/src/io'
import * as path from 'path'
import * as utils from '../src/internal-utils'
import * as utils from '../src/internal/utils'
import * as core from '@actions/core'
import {HttpCodes} from '@actions/http-client'
import {getRuntimeUrl, getWorkFlowRunId} from '../src/internal-config-variables'
import {getRuntimeUrl, getWorkFlowRunId} from '../src/internal/config-variables'
jest.mock('../src/internal-config-variables')
jest.mock('../src/internal/config-variables')
describe('Utils', () => {
beforeAll(() => {
@ -49,6 +49,36 @@ describe('Utils', () => {
}
})
it('Check Artifact File Path for any invalid characters', () => {
const invalidNames = [
'some/invalid"artifact/path',
'some/invalid:artifact/path',
'some/invalid<artifact/path',
'some/invalid>artifact/path',
'some/invalid|artifact/path',
'some/invalid*artifact/path',
'some/invalid?artifact/path',
'some/invalid artifact/path',
''
]
for (const invalidName of invalidNames) {
expect(() => {
utils.checkArtifactFilePath(invalidName)
}).toThrow()
}
const validNames = [
'my/perfectly-normal/artifact-path',
'my/perfectly\\Normal/Artifact-path',
'm¥/ñðrmål/Är†ï£å¢†'
]
for (const validName of validNames) {
expect(() => {
utils.checkArtifactFilePath(validName)
}).not.toThrow()
}
})
it('Test constructing artifact URL', () => {
const runtimeUrl = getRuntimeUrl()
const runId = getWorkFlowRunId()
@ -61,13 +91,25 @@ describe('Utils', () => {
it('Test constructing headers with all optional parameters', () => {
const type = 'application/json'
const size = 24
const uncompressedLength = 100
const range = 'bytes 0-199/200'
const options = utils.getRequestOptions(type, size, range)
expect(Object.keys(options).length).toEqual(4)
const options = utils.getRequestOptions(
type,
true,
true,
uncompressedLength,
size,
range
)
expect(Object.keys(options).length).toEqual(8)
expect(options['Accept']).toEqual(
`${type};api-version=${utils.getApiVersion()}`
)
expect(options['Content-Type']).toEqual(type)
expect(options['Connection']).toEqual('Keep-Alive')
expect(options['Keep-Alive']).toEqual('10')
expect(options['Content-Encoding']).toEqual('gzip')
expect(options['x-tfs-filelength']).toEqual(uncompressedLength)
expect(options['Content-Length']).toEqual(size)
expect(options['Content-Range']).toEqual(range)
})

View File

@ -43,3 +43,11 @@ const uploadResult = await artifactClient.uploadArtifact(artifactName, files, ro
During upload, each file is uploaded concurrently in 4MB chunks using a separate HTTPS connection per file. Chunked uploads are used so that in the event of a failure (which is entirely possible because the internet is not perfect), the upload can be retried. If there is an error, a retry will be attempted after a certain period of time.
Uploading will be generally be faster if there are fewer files that are larger in size vs if there are lots of smaller files. Depending on the types and quantities of files being uploaded, it might be beneficial to separately compress and archive everything into a single archive (using something like `tar` or `zip`) before starting and artifact upload to speed things up.
## Is my artifact compressed?
GZip is used internally to compress individual files before starting an upload. Compression helps reduce the total amount of data that must be uploaded and stored while helping to speed up uploads (this performance benefit is significant especially on self hosted runners). If GZip does not reduce the size of the file that is being uploaded, the original file is uploaded as-is.
Compression using GZip also helps speed up artifact download as part of a workflow. Header information is used to determine if an individual file was uploaded using GZip and if necessary, decompression is used.
When downloading an artifact from the GitHub UI (this differs from downloading an artifact during a workflow), a single Zip file is dynamically created that contains all of the files uploaded as part of an artifact. Any files that were uploaded using GZip will be decompressed on the server before being added to the Zip file with the remaining files.

View File

@ -0,0 +1,43 @@
# Implementation Details
## Proxy support
This package uses the `@actions/http-client` NPM package internally which supports proxied requests out of the box.
## HttpManager
### `keep-alive` header
When an HTTP call is made to upload or download an individual file, the server will close the HTTP connection after the upload/download is complete and respond with a header indicating `Connection: close`.
[HTTP closed connection header information](https://tools.ietf.org/html/rfc2616#section-14.10)
TCP connections are sometimes not immediately closed by the node client (Windows might hold on to the port for an extra period of time before actually releasing it for example) and a large amount of closed connections can cause port exhaustion before ports get released and are available again.
VMs hosted by GitHub Actions have 1024 available ports so uploading 1000+ files very quickly can cause port exhaustion if connections get closed immediately. This can start to cause strange undefined behavior and timeouts.
In order for connections to not close immediately, the `keep-alive` header is used to indicate to the server that the connection should stay open. If a `keep-alive` header is used, the connection needs to be disposed of by calling `dispose()` in the `HttpClient`.
[`keep-alive` header information](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Keep-Alive)
[@actions/http-client client disposal](https://github.com/actions/http-client/blob/04e5ad73cd3fd1f5610a32116b0759eddf6570d2/index.ts#L292)
### Multiple HTTP clients
During an artifact upload or download, files are concurrently uploaded or downloaded using `async/await`. When an error or retry is encountered, the `HttpClient` that made a call is disposed of and a new one is created. If a single `HttpClient` was used for all HTTP calls and it had to be disposed, it could inadvertently effect any other calls that could be concurrently happening.
Any other concurrent uploads or downloads should be left untouched. Because of this, each concurrent upload or download gets its own `HttpClient`. The `http-manager` is used to manage all available clients and each concurrent upload or download maintains a `httpClientIndex` that keep track of which client should be used (and potentially disposed and recycled if necessary)
### Potential resource leaks
When an HTTP response is received, it consists of two parts
- `message`
- `body`
The `message` contains information such as the response code and header information and it is available immediately. The body however is not available immediately and it can be read by calling `await response.readBody()`.
TCP connections consist of an input and output buffer to manage what is sent and received across a connection. If the body is not read (even if its contents are not needed) the buffers can stay in use even after `dispose()` gets called on the `HttpClient`. The buffers get released automatically after a certain period of time, but in order for them to be explicitly cleared, `readBody()` is always called.
### Non Concurrent calls
Both `upload-http-client` and `download-http-client` do not instantiate or create any HTTP clients (the `HttpManager` has that responsibility). If an HTTP call has to be made that does not require the `keep-alive` header (such as when calling `listArtifacts` or `patchArtifactSize`), the first `HttpClient` in the `HttpManager` is used. The number of available clients is equal to the upload or download concurrency and there will always be at least one available.

View File

@ -12,10 +12,116 @@
"tunnel": "0.0.6"
}
},
"@types/tmp": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/@types/tmp/-/tmp-0.1.0.tgz",
"integrity": "sha512-6IwZ9HzWbCq6XoQWhxLpDjuADodH/MKXRUIDFudvgjcVdjFknvmR+DNsoUeer4XPrEnrZs04Jj+kfV9pFsrhmA=="
},
"balanced-match": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
"integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c="
},
"brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
},
"fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
},
"glob": {
"version": "7.1.6",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
"integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.0.4",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
"requires": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"minimatch": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
"integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
"requires": {
"brace-expansion": "^1.1.7"
}
},
"once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
"requires": {
"wrappy": "1"
}
},
"path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
},
"rimraf": {
"version": "2.7.1",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
"integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
"requires": {
"glob": "^7.1.3"
}
},
"tmp": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.1.0.tgz",
"integrity": "sha512-J7Z2K08jbGcdA1kkQpJSqLF6T0tdQqpR2pnSUXsIchbPdTI9v3e85cLW0d6WDhwuAleOV71j2xWs8qMPfK7nKw==",
"requires": {
"rimraf": "^2.6.3"
}
},
"tmp-promise": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-2.0.2.tgz",
"integrity": "sha512-zl71nFWjPKW2KXs+73gEk8RmqvtAeXPxhWDkTUoa3MSMkjq3I+9OeknjF178MQoMYsdqL730hfzvNfEkePxq9Q==",
"requires": {
"tmp": "0.1.0"
}
},
"tunnel": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz",
"integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="
},
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
}
}
}

View File

@ -37,6 +37,9 @@
},
"dependencies": {
"@actions/core": "^1.2.1",
"@actions/http-client": "^1.0.6"
"@actions/http-client": "^1.0.6",
"@types/tmp": "^0.1.0",
"tmp": "^0.1.0",
"tmp-promise": "^2.0.2"
}
}

View File

@ -1,5 +1,16 @@
import {ArtifactClient, DefaultArtifactClient} from './internal-artifact-client'
export {ArtifactClient}
import {UploadOptions} from './internal/upload-options'
import {UploadResponse} from './internal/upload-response'
import {DownloadOptions} from './internal/download-options'
import {DownloadResponse} from './internal/download-response'
import {ArtifactClient, DefaultArtifactClient} from './internal/artifact-client'
export {
ArtifactClient,
UploadResponse,
UploadOptions,
DownloadResponse,
DownloadOptions
}
/**
* Constructs an ArtifactClient

View File

@ -1,133 +0,0 @@
import * as fs from 'fs'
import {
createHttpClient,
getArtifactUrl,
getRequestOptions,
isSuccessStatusCode,
isRetryableStatusCode
} from './internal-utils'
import {URL} from 'url'
import {
ListArtifactsResponse,
QueryArtifactResponse
} from './internal-contracts'
import {IHttpClientResponse} from '@actions/http-client/interfaces'
import {HttpClient} from '@actions/http-client'
import {DownloadItem} from './internal-download-specification'
import {getDownloadFileConcurrency} from './internal-config-variables'
import {warning} from '@actions/core'
/**
* Gets a list of all artifacts that are in a specific container
*/
export async function listArtifacts(): Promise<ListArtifactsResponse> {
const artifactUrl = getArtifactUrl()
const client = createHttpClient()
const requestOptions = getRequestOptions('application/json')
const rawResponse = await client.get(artifactUrl, requestOptions)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode) && body) {
return JSON.parse(body)
}
// eslint-disable-next-line no-console
console.log(rawResponse)
throw new Error(`Unable to list artifacts for the run`)
}
/**
* Fetches a set of container items that describe the contents of an artifact
* @param artifactName the name of the artifact
* @param containerUrl the artifact container URL for the run
*/
export async function getContainerItems(
artifactName: string,
containerUrl: string
): Promise<QueryArtifactResponse> {
// The itemPath search parameter controls which containers will be returned
const resourceUrl = new URL(containerUrl)
resourceUrl.searchParams.append('itemPath', artifactName)
const client = createHttpClient()
const rawResponse = await client.get(resourceUrl.toString())
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode) && body) {
return JSON.parse(body)
}
// eslint-disable-next-line no-console
console.log(rawResponse)
throw new Error(`Unable to get ContainersItems from ${resourceUrl}`)
}
/**
* Concurrently downloads all the files that are part of an artifact
* @param downloadItems information about what items to download and where to save them
*/
export async function downloadSingleArtifact(
downloadItems: DownloadItem[]
): Promise<void> {
const DOWNLOAD_CONCURRENCY = getDownloadFileConcurrency()
// Limit the number of files downloaded at a single time
const parallelDownloads = [...new Array(DOWNLOAD_CONCURRENCY).keys()]
const client = createHttpClient()
let downloadedFiles = 0
await Promise.all(
parallelDownloads.map(async () => {
while (downloadedFiles < downloadItems.length) {
const currentFileToDownload = downloadItems[downloadedFiles]
downloadedFiles += 1
await downloadIndividualFile(
client,
currentFileToDownload.sourceLocation,
currentFileToDownload.targetPath
)
}
})
)
}
/**
* Downloads an individual file
* @param client http client that will be used to make the necessary calls
* @param artifactLocation origin location where a file will be downloaded from
* @param downloadPath destination location for the file being downloaded
*/
export async function downloadIndividualFile(
client: HttpClient,
artifactLocation: string,
downloadPath: string
): Promise<void> {
const stream = fs.createWriteStream(downloadPath)
const response = await client.get(artifactLocation)
if (isSuccessStatusCode(response.message.statusCode)) {
await pipeResponseToStream(response, stream)
} else if (isRetryableStatusCode(response.message.statusCode)) {
warning(
`Received http ${response.message.statusCode} during file download, will retry ${artifactLocation} after 10 seconds`
)
await new Promise(resolve => setTimeout(resolve, 10000))
const retryResponse = await client.get(artifactLocation)
if (isSuccessStatusCode(retryResponse.message.statusCode)) {
await pipeResponseToStream(response, stream)
} else {
// eslint-disable-next-line no-console
console.log(retryResponse)
throw new Error(`Unable to download ${artifactLocation}`)
}
} else {
// eslint-disable-next-line no-console
console.log(response)
throw new Error(`Unable to download ${artifactLocation}`)
}
}
export async function pipeResponseToStream(
response: IHttpClientResponse,
stream: NodeJS.WritableStream
): Promise<void> {
return new Promise(resolve => {
response.message.pipe(stream).on('close', () => {
resolve()
})
})
}

View File

@ -1,322 +0,0 @@
import {debug, warning, info} from '@actions/core'
import {HttpClientResponse, HttpClient} from '@actions/http-client/index'
import {IHttpClientResponse} from '@actions/http-client/interfaces'
import {
ArtifactResponse,
CreateArtifactParameters,
PatchArtifactSize,
UploadResults
} from './internal-contracts'
import * as fs from 'fs'
import {UploadSpecification} from './internal-upload-specification'
import {UploadOptions} from './internal-upload-options'
import {URL} from 'url'
import {
createHttpClient,
getArtifactUrl,
getContentRange,
getRequestOptions,
isRetryableStatusCode,
isSuccessStatusCode
} from './internal-utils'
import {
getUploadChunkConcurrency,
getUploadChunkSize,
getUploadFileConcurrency
} from './internal-config-variables'
/**
* Creates a file container for the new artifact in the remote blob storage/file service
* @param {string} artifactName Name of the artifact being created
* @returns The response from the Artifact Service if the file container was successfully created
*/
export async function createArtifactInFileContainer(
artifactName: string
): Promise<ArtifactResponse> {
const parameters: CreateArtifactParameters = {
Type: 'actions_storage',
Name: artifactName
}
const data: string = JSON.stringify(parameters, null, 2)
const artifactUrl = getArtifactUrl()
const client = createHttpClient()
const requestOptions = getRequestOptions('application/json')
const rawResponse = await client.post(artifactUrl, data, requestOptions)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode) && body) {
return JSON.parse(body)
} else {
// eslint-disable-next-line no-console
console.log(rawResponse)
throw new Error(
`Unable to create a container for the artifact ${artifactName}`
)
}
}
/**
* Concurrently upload all of the files in chunks
* @param {string} uploadUrl Base Url for the artifact that was created
* @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
* @returns The size of all the files uploaded in bytes
*/
export async function uploadArtifactToFileContainer(
uploadUrl: string,
filesToUpload: UploadSpecification[],
options?: UploadOptions
): Promise<UploadResults> {
const client = createHttpClient()
const FILE_CONCURRENCY = getUploadFileConcurrency()
const CHUNK_CONCURRENCY = getUploadChunkConcurrency()
const MAX_CHUNK_SIZE = getUploadChunkSize()
debug(
`File Concurrency: ${FILE_CONCURRENCY}, Chunk Concurrency: ${CHUNK_CONCURRENCY} and Chunk Size: ${MAX_CHUNK_SIZE}`
)
const parameters: UploadFileParameters[] = []
// by default, file uploads will continue if there is an error unless specified differently in the options
let continueOnError = true
if (options) {
if (options.continueOnError === false) {
continueOnError = false
}
}
// Prepare the necessary parameters to upload all the files
for (const file of filesToUpload) {
const resourceUrl = new URL(uploadUrl)
resourceUrl.searchParams.append('itemPath', file.uploadFilePath)
parameters.push({
file: file.absoluteFilePath,
resourceUrl: resourceUrl.toString(),
restClient: client,
concurrency: CHUNK_CONCURRENCY,
maxChunkSize: MAX_CHUNK_SIZE,
continueOnError
})
}
const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()]
const failedItemsToReport: string[] = []
let uploadedFiles = 0
let fileSizes = 0
let abortPendingFileUploads = false
// Only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
await Promise.all(
parallelUploads.map(async () => {
while (uploadedFiles < filesToUpload.length) {
const currentFileParameters = parameters[uploadedFiles]
uploadedFiles += 1
if (abortPendingFileUploads) {
failedItemsToReport.push(currentFileParameters.file)
continue
}
const uploadFileResult = await uploadFileAsync(currentFileParameters)
fileSizes += uploadFileResult.successfulUploadSize
if (uploadFileResult.isSuccess === false) {
failedItemsToReport.push(currentFileParameters.file)
if (!continueOnError) {
// Existing uploads will be able to finish however all pending uploads will fail fast
abortPendingFileUploads = true
}
}
}
})
)
info(`Total size of all the files uploaded is ${fileSizes} bytes`)
return {
size: fileSizes,
failedItems: failedItemsToReport
}
}
/**
* Asynchronously uploads a file. If the file is bigger than the max chunk size it will be uploaded via multiple calls
* @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
* @returns The size of the file that was uploaded in bytes along with any failed uploads
*/
async function uploadFileAsync(
parameters: UploadFileParameters
): Promise<UploadFileResult> {
const fileSize: number = fs.statSync(parameters.file).size
const parallelUploads = [...new Array(parameters.concurrency).keys()]
let offset = 0
let isUploadSuccessful = true
let failedChunkSizes = 0
let abortFileUpload = false
await Promise.all(
parallelUploads.map(async () => {
while (offset < fileSize) {
const chunkSize = Math.min(fileSize - offset, parameters.maxChunkSize)
if (abortFileUpload) {
// if we don't want to continue on error, any pending upload chunk will be marked as failed
failedChunkSizes += chunkSize
continue
}
const start = offset
const end = offset + chunkSize - 1
offset += parameters.maxChunkSize
const chunk: NodeJS.ReadableStream = fs.createReadStream(
parameters.file,
{
start,
end,
autoClose: false
}
)
const result = await uploadChunk(
parameters.restClient,
parameters.resourceUrl,
chunk,
start,
end,
fileSize
)
if (!result) {
/**
* Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
* successfully uploaded so the server may report a different size for what was uploaded
**/
isUploadSuccessful = false
failedChunkSizes += chunkSize
warning(`Aborting upload for ${parameters.file} due to failure`)
abortFileUpload = true
}
}
})
)
return {
isSuccess: isUploadSuccessful,
successfulUploadSize: fileSize - failedChunkSizes
}
}
/**
* Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
* indicates a retryable status, we try to upload the chunk as well
* @param {HttpClient} restClient RestClient that will be making the appropriate HTTP call
* @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
* @param {NodeJS.ReadableStream} data Stream of the file that will be uploaded
* @param {number} start Starting byte index of file that the chunk belongs to
* @param {number} end Ending byte index of file that the chunk belongs to
* @param {number} totalSize Total size of the file in bytes that is being uploaded
* @returns if the chunk was successfully uploaded
*/
async function uploadChunk(
restClient: HttpClient,
resourceUrl: string,
data: NodeJS.ReadableStream,
start: number,
end: number,
totalSize: number
): Promise<boolean> {
info(
`Uploading chunk of size ${end -
start +
1} bytes at offset ${start} with content range: ${getContentRange(
start,
end,
totalSize
)}`
)
const requestOptions = getRequestOptions(
'application/octet-stream',
totalSize,
getContentRange(start, end, totalSize)
)
const uploadChunkRequest = async (): Promise<IHttpClientResponse> => {
return await restClient.sendStream('PUT', resourceUrl, data, requestOptions)
}
const response = await uploadChunkRequest()
if (isSuccessStatusCode(response.message.statusCode)) {
debug(
`Chunk for ${start}:${end} was successfully uploaded to ${resourceUrl}`
)
return true
} else if (isRetryableStatusCode(response.message.statusCode)) {
info(
`Received http ${response.message.statusCode} during chunk upload, will retry at offset ${start} after 10 seconds.`
)
await new Promise(resolve => setTimeout(resolve, 10000))
const retryResponse = await uploadChunkRequest()
if (isSuccessStatusCode(retryResponse.message.statusCode)) {
return true
} else {
info(`Unable to upload chunk even after retrying`)
// eslint-disable-next-line no-console
console.log(response)
return false
}
}
// Upload must have failed spectacularly somehow, log full result for diagnostic purposes
// eslint-disable-next-line no-console
console.log(response)
return false
}
/**
* Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
* Updating the size indicates that we are done uploading all the contents of the artifact. A server side check will be run
* to check that the artifact size is correct for billing purposes
*/
export async function patchArtifactSize(
size: number,
artifactName: string
): Promise<void> {
const client = createHttpClient()
const requestOptions = getRequestOptions('application/json')
const resourceUrl = new URL(getArtifactUrl())
resourceUrl.searchParams.append('artifactName', artifactName)
const parameters: PatchArtifactSize = {Size: size}
const data: string = JSON.stringify(parameters, null, 2)
debug(`URL is ${resourceUrl.toString()}`)
const rawResponse: HttpClientResponse = await client.patch(
resourceUrl.toString(),
data,
requestOptions
)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode)) {
debug(
`Artifact ${artifactName} has been successfully uploaded, total size ${size}`
)
debug(body)
} else if (rawResponse.message.statusCode === 404) {
throw new Error(`An Artifact with the name ${artifactName} was not found`)
} else {
// eslint-disable-next-line no-console
console.log(body)
throw new Error(`Unable to finish uploading artifact ${artifactName}`)
}
}
interface UploadFileParameters {
file: string
resourceUrl: string
restClient: HttpClient
concurrency: number
maxChunkSize: number
continueOnError: boolean
}
interface UploadFileResult {
isSuccess: boolean
successfulUploadSize: number
}

View File

@ -12,6 +12,19 @@ export function getUploadChunkConcurrency(): number {
export function getUploadChunkSize(): number {
return 4 * 1024 * 1024 // 4 MB Chunks
}
export function getUploadRetryCount(): number {
return 1
}
export function getRetryWaitTimeInMilliseconds(): number {
return 1
}
export function getDownloadFileConcurrency(): number {
return 1
}
/**
* Mocks the 'ACTIONS_RUNTIME_TOKEN', 'ACTIONS_RUNTIME_URL' and 'GITHUB_RUN_ID' env variables
* that are only available from a node context on the runner. This allows for tests to run

View File

@ -2,31 +2,18 @@ import * as core from '@actions/core'
import {
UploadSpecification,
getUploadSpecification
} from './internal-upload-specification'
import {
createArtifactInFileContainer,
uploadArtifactToFileContainer,
patchArtifactSize
} from './internal-upload-http-client'
import {UploadResponse} from './internal-upload-response'
import {UploadOptions} from './internal-upload-options'
import {DownloadOptions} from './internal-download-options'
import {DownloadResponse} from './internal-download-response'
import {checkArtifactName, createDirectoriesForArtifact} from './internal-utils'
import {
listArtifacts,
downloadSingleArtifact,
getContainerItems
} from './internal-download-http-client'
import {getDownloadSpecification} from './internal-download-specification'
import {
getWorkSpaceDirectory,
getDownloadArtifactConcurrency
} from './internal-config-variables'
} from './upload-specification'
import {UploadHttpClient} from './upload-http-client'
import {UploadResponse} from './upload-response'
import {UploadOptions} from './upload-options'
import {DownloadOptions} from './download-options'
import {DownloadResponse} from './download-response'
import {checkArtifactName, createDirectoriesForArtifact} from './utils'
import {DownloadHttpClient} from './download-http-client'
import {getDownloadSpecification} from './download-specification'
import {getWorkSpaceDirectory} from './config-variables'
import {normalize, resolve} from 'path'
export {UploadResponse, UploadOptions, DownloadResponse, DownloadOptions}
export interface ArtifactClient {
/**
* Uploads an artifact
@ -96,11 +83,15 @@ export class DefaultArtifactClient implements ArtifactClient {
failedItems: []
}
const uploadHttpClient = new UploadHttpClient()
if (uploadSpecification.length === 0) {
core.warning(`No files found that can be uploaded`)
} else {
// Create an entry for the artifact in the file container
const response = await createArtifactInFileContainer(name)
const response = await uploadHttpClient.createArtifactInFileContainer(
name
)
if (!response.fileContainerResourceUrl) {
core.debug(response.toString())
throw new Error(
@ -110,23 +101,24 @@ export class DefaultArtifactClient implements ArtifactClient {
core.debug(`Upload Resource URL: ${response.fileContainerResourceUrl}`)
// Upload each of the files that were found concurrently
const uploadResult = await uploadArtifactToFileContainer(
const uploadResult = await uploadHttpClient.uploadArtifactToFileContainer(
response.fileContainerResourceUrl,
uploadSpecification,
options
)
// Update the size of the artifact to indicate we are done uploading
await patchArtifactSize(uploadResult.size, name)
// The uncompressed size is used for display when downloading a zip of the artifact from the UI
await uploadHttpClient.patchArtifactSize(uploadResult.totalSize, name)
core.info(
`Finished uploading artifact ${name}. Reported size is ${uploadResult.size} bytes. There were ${uploadResult.failedItems.length} items that failed to upload`
`Finished uploading artifact ${name}. Reported size is ${uploadResult.uploadSize} bytes. There were ${uploadResult.failedItems.length} items that failed to upload`
)
uploadResponse.artifactItems = uploadSpecification.map(
item => item.absoluteFilePath
)
uploadResponse.size = uploadResult.size
uploadResponse.size = uploadResult.uploadSize
uploadResponse.failedItems = uploadResult.failedItems
}
return uploadResponse
@ -137,7 +129,9 @@ export class DefaultArtifactClient implements ArtifactClient {
path?: string | undefined,
options?: DownloadOptions | undefined
): Promise<DownloadResponse> {
const artifacts = await listArtifacts()
const downloadHttpClient = new DownloadHttpClient()
const artifacts = await downloadHttpClient.listArtifacts()
if (artifacts.count === 0) {
throw new Error(
`Unable to find any artifacts for the associated workflow`
@ -151,7 +145,7 @@ export class DefaultArtifactClient implements ArtifactClient {
throw new Error(`Unable to find an artifact with the name: ${name}`)
}
const items = await getContainerItems(
const items = await downloadHttpClient.getContainerItems(
artifactToDownload.name,
artifactToDownload.fileContainerResourceUrl
)
@ -179,7 +173,9 @@ export class DefaultArtifactClient implements ArtifactClient {
await createDirectoriesForArtifact(
downloadSpecification.directoryStructure
)
await downloadSingleArtifact(downloadSpecification.filesToDownload)
await downloadHttpClient.downloadSingleArtifact(
downloadSpecification.filesToDownload
)
}
return {
@ -191,8 +187,10 @@ export class DefaultArtifactClient implements ArtifactClient {
async downloadAllArtifacts(
path?: string | undefined
): Promise<DownloadResponse[]> {
const downloadHttpClient = new DownloadHttpClient()
const response: DownloadResponse[] = []
const artifacts = await listArtifacts()
const artifacts = await downloadHttpClient.listArtifacts()
if (artifacts.count === 0) {
core.info('Unable to find any artifacts for the associated workflow')
return response
@ -204,26 +202,21 @@ export class DefaultArtifactClient implements ArtifactClient {
path = normalize(path)
path = resolve(path)
const ARTIFACT_CONCURRENCY = getDownloadArtifactConcurrency()
const parallelDownloads = [...new Array(ARTIFACT_CONCURRENCY).keys()]
let downloadedArtifacts = 0
await Promise.all(
parallelDownloads.map(async () => {
while (downloadedArtifacts < artifacts.count) {
const currentArtifactToDownload = artifacts.value[downloadedArtifacts]
downloadedArtifacts += 1
// Get container entries for the specific artifact
const items = await getContainerItems(
const items = await downloadHttpClient.getContainerItems(
currentArtifactToDownload.name,
currentArtifactToDownload.fileContainerResourceUrl
)
// Promise.All is not correctly inferring that 'path' is no longer possibly undefined: https://github.com/microsoft/TypeScript/issues/34925
const downloadSpecification = getDownloadSpecification(
currentArtifactToDownload.name,
items.value,
path!, // eslint-disable-line @typescript-eslint/no-non-null-assertion
path,
true
)
if (downloadSpecification.filesToDownload.length === 0) {
@ -234,7 +227,9 @@ export class DefaultArtifactClient implements ArtifactClient {
await createDirectoriesForArtifact(
downloadSpecification.directoryStructure
)
await downloadSingleArtifact(downloadSpecification.filesToDownload)
await downloadHttpClient.downloadSingleArtifact(
downloadSpecification.filesToDownload
)
}
response.push({
@ -242,8 +237,6 @@ export class DefaultArtifactClient implements ArtifactClient {
downloadPath: downloadSpecification.rootDownloadLocation
})
}
})
)
return response
}
}

View File

@ -2,21 +2,20 @@ export function getUploadFileConcurrency(): number {
return 2
}
export function getUploadChunkConcurrency(): number {
return 1
}
export function getUploadChunkSize(): number {
return 4 * 1024 * 1024 // 4 MB Chunks
}
export function getDownloadFileConcurrency(): number {
return 2
export function getUploadRetryCount(): number {
return 3
}
export function getDownloadArtifactConcurrency(): number {
// when downloading all artifact at once, this is number of concurrent artifacts being downloaded
return 1
export function getRetryWaitTimeInMilliseconds(): number {
return 10000
}
export function getDownloadFileConcurrency(): number {
return 2
}
export function getRuntimeToken(): string {

View File

@ -28,7 +28,8 @@ export interface PatchArtifactSizeSuccessResponse {
}
export interface UploadResults {
size: number
uploadSize: number
totalSize: number
failedItems: string[]
}

View File

@ -0,0 +1,189 @@
import * as fs from 'fs'
import * as zlib from 'zlib'
import {
getArtifactUrl,
getRequestOptions,
isSuccessStatusCode,
isRetryableStatusCode,
createHttpClient
} from './utils'
import {URL} from 'url'
import {ListArtifactsResponse, QueryArtifactResponse} from './contracts'
import {IHttpClientResponse} from '@actions/http-client/interfaces'
import {HttpManager} from './http-manager'
import {DownloadItem} from './download-specification'
import {
getDownloadFileConcurrency,
getRetryWaitTimeInMilliseconds
} from './config-variables'
import {warning} from '@actions/core'
import {IncomingHttpHeaders} from 'http'
export class DownloadHttpClient {
// http manager is used for concurrent connection when downloading mulitple files at once
private downloadHttpManager: HttpManager
constructor() {
this.downloadHttpManager = new HttpManager(getDownloadFileConcurrency())
}
/**
* Gets a list of all artifacts that are in a specific container
*/
async listArtifacts(): Promise<ListArtifactsResponse> {
const artifactUrl = getArtifactUrl()
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediatly
const client = this.downloadHttpManager.getClient(0)
const requestOptions = getRequestOptions('application/json')
const rawResponse = await client.get(artifactUrl, requestOptions)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode) && body) {
return JSON.parse(body)
}
// eslint-disable-next-line no-console
console.log(rawResponse)
throw new Error(`Unable to list artifacts for the run`)
}
/**
* Fetches a set of container items that describe the contents of an artifact
* @param artifactName the name of the artifact
* @param containerUrl the artifact container URL for the run
*/
async getContainerItems(
artifactName: string,
containerUrl: string
): Promise<QueryArtifactResponse> {
// the itemPath search parameter controls which containers will be returned
const resourceUrl = new URL(containerUrl)
resourceUrl.searchParams.append('itemPath', artifactName)
// no concurrent calls so a single httpClient without the http-manager is sufficient
const client = createHttpClient()
// no keep-alive header, client disposal is not necessary
const requestOptions = getRequestOptions('application/json')
const rawResponse = await client.get(resourceUrl.toString(), requestOptions)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode) && body) {
return JSON.parse(body)
}
// eslint-disable-next-line no-console
console.log(rawResponse)
throw new Error(`Unable to get ContainersItems from ${resourceUrl}`)
}
/**
* Concurrently downloads all the files that are part of an artifact
* @param downloadItems information about what items to download and where to save them
*/
async downloadSingleArtifact(downloadItems: DownloadItem[]): Promise<void> {
const DOWNLOAD_CONCURRENCY = getDownloadFileConcurrency()
// limit the number of files downloaded at a single time
const parallelDownloads = [...new Array(DOWNLOAD_CONCURRENCY).keys()]
let downloadedFiles = 0
await Promise.all(
parallelDownloads.map(async index => {
while (downloadedFiles < downloadItems.length) {
const currentFileToDownload = downloadItems[downloadedFiles]
downloadedFiles += 1
await this.downloadIndividualFile(
index,
currentFileToDownload.sourceLocation,
currentFileToDownload.targetPath
)
}
})
)
// done downloading, safety dispose all connections
this.downloadHttpManager.disposeAndReplaceAllClients()
}
/**
* Downloads an individual file
* @param httpClientIndex the index of the http client that is used to make all of the calls
* @param artifactLocation origin location where a file will be downloaded from
* @param downloadPath destination location for the file being downloaded
*/
private async downloadIndividualFile(
httpClientIndex: number,
artifactLocation: string,
downloadPath: string
): Promise<void> {
const stream = fs.createWriteStream(downloadPath)
const client = this.downloadHttpManager.getClient(httpClientIndex)
const requestOptions = getRequestOptions('application/octet-stream', true)
const response = await client.get(artifactLocation, requestOptions)
// check the response headers to determine if the file was compressed using gzip
const isGzip = (headers: IncomingHttpHeaders): boolean => {
return (
'content-encoding' in headers && headers['content-encoding'] === 'gzip'
)
}
if (isSuccessStatusCode(response.message.statusCode)) {
await this.pipeResponseToStream(
response,
stream,
isGzip(response.message.headers)
)
} else if (isRetryableStatusCode(response.message.statusCode)) {
warning(
`Received http ${response.message.statusCode} during file download, will retry ${artifactLocation} after 10 seconds`
)
// if an error is encountered, dispose of the http connection, and create a new one
this.downloadHttpManager.disposeAndReplaceClient(httpClientIndex)
await new Promise(resolve =>
setTimeout(resolve, getRetryWaitTimeInMilliseconds())
)
const retryResponse = await client.get(artifactLocation)
if (isSuccessStatusCode(retryResponse.message.statusCode)) {
await this.pipeResponseToStream(
response,
stream,
isGzip(response.message.headers)
)
} else {
// eslint-disable-next-line no-console
console.log(retryResponse)
throw new Error(`Unable to download ${artifactLocation}`)
}
} else {
// eslint-disable-next-line no-console
console.log(response)
throw new Error(`Unable to download ${artifactLocation}`)
}
}
/**
* Pipes the response from downloading an individual file to the appropriate stream
* @param response the http response recieved when downloading a file
* @param stream the stream where the file should be written to
* @param isGzip does the response need to be be uncompressed
*/
private async pipeResponseToStream(
response: IHttpClientResponse,
stream: NodeJS.WritableStream,
isGzip: boolean
): Promise<void> {
return new Promise(resolve => {
if (isGzip) {
// pipe the response into gunzip to decompress
const gunzip = zlib.createGunzip()
response.message
.pipe(gunzip)
.pipe(stream)
.on('close', () => {
resolve()
})
} else {
response.message.pipe(stream).on('close', () => {
resolve()
})
}
})
}
}

View File

@ -1,5 +1,5 @@
import * as path from 'path'
import {ContainerEntry} from './internal-contracts'
import {ContainerEntry} from './contracts'
export interface DownloadSpecification {
// root download location for the artifact

View File

@ -0,0 +1,33 @@
import {HttpClient} from '@actions/http-client/index'
import {createHttpClient} from './utils'
/**
* Used for managing http clients during either upload or download
*/
export class HttpManager {
private clients: HttpClient[]
constructor(clientCount: number) {
if (clientCount < 1) {
throw new Error('There must be at least one client')
}
this.clients = new Array(clientCount).fill(createHttpClient())
}
getClient(index: number): HttpClient {
return this.clients[index]
}
// client disposal is necessary if a keep-alive connection is used to properly close the connection
// for more information see: https://github.com/actions/http-client/blob/04e5ad73cd3fd1f5610a32116b0759eddf6570d2/index.ts#L292
disposeAndReplaceClient(index: number): void {
this.clients[index].dispose()
this.clients[index] = createHttpClient()
}
disposeAndReplaceAllClients(): void {
for (const [index] of this.clients.entries()) {
this.disposeAndReplaceClient(index)
}
}
}

View File

@ -0,0 +1,53 @@
import * as fs from 'fs'
import * as zlib from 'zlib'
import {promisify} from 'util'
const stat = promisify(fs.stat)
/**
* Creates a Gzip compressed file of an original file at the provided temporary filepath location
* @param {string} originalFilePath filepath of whatever will be compressed. The original file will be unmodified
* @param {string} tempFilePath the location of where the Gzip file will be created
* @returns the size of gzip file that gets created
*/
export async function createGZipFileOnDisk(
originalFilePath: string,
tempFilePath: string
): Promise<number> {
return new Promise((resolve, reject) => {
const inputStream = fs.createReadStream(originalFilePath)
const gzip = zlib.createGzip()
const outputStream = fs.createWriteStream(tempFilePath)
inputStream.pipe(gzip).pipe(outputStream)
outputStream.on('finish', async () => {
// wait for stream to finish before calculating the size which is needed as part of the Content-Length header when starting an upload
const size = (await stat(tempFilePath)).size
resolve(size)
})
outputStream.on('error', error => {
// eslint-disable-next-line no-console
console.log(error)
reject
})
})
}
/**
* Creates a GZip file in memory using a buffer. Should be used for smaller files to reduce disk I/O
* @param originalFilePath the path to the original file that is being GZipped
* @returns a buffer with the GZip file
*/
export async function createGZipFileInBuffer(
originalFilePath: string
): Promise<Buffer> {
return new Promise(async resolve => {
const inputStream = fs.createReadStream(originalFilePath)
const gzip = zlib.createGzip()
inputStream.pipe(gzip)
// read stream into buffer, using experimental async itterators see https://github.com/nodejs/readable-stream/issues/403#issuecomment-479069043
const chunks = []
for await (const chunk of gzip) {
chunks.push(chunk)
}
resolve(Buffer.concat(chunks))
})
}

View File

@ -0,0 +1,465 @@
import * as fs from 'fs'
import * as tmp from 'tmp-promise'
import * as stream from 'stream'
import {
ArtifactResponse,
CreateArtifactParameters,
PatchArtifactSize,
UploadResults
} from './contracts'
import {
getArtifactUrl,
getContentRange,
getRequestOptions,
isRetryableStatusCode,
isSuccessStatusCode
} from './utils'
import {
getUploadChunkSize,
getUploadFileConcurrency,
getUploadRetryCount,
getRetryWaitTimeInMilliseconds
} from './config-variables'
import {promisify} from 'util'
import {URL} from 'url'
import {performance} from 'perf_hooks'
import {UploadStatusReporter} from './upload-status-reporter'
import {debug, warning, info} from '@actions/core'
import {HttpClientResponse} from '@actions/http-client/index'
import {IHttpClientResponse} from '@actions/http-client/interfaces'
import {HttpManager} from './http-manager'
import {UploadSpecification} from './upload-specification'
import {UploadOptions} from './upload-options'
import {createGZipFileOnDisk, createGZipFileInBuffer} from './upload-gzip'
const stat = promisify(fs.stat)
export class UploadHttpClient {
private uploadHttpManager: HttpManager
private statusReporter: UploadStatusReporter
constructor() {
this.uploadHttpManager = new HttpManager(getUploadFileConcurrency())
this.statusReporter = new UploadStatusReporter()
}
/**
* Creates a file container for the new artifact in the remote blob storage/file service
* @param {string} artifactName Name of the artifact being created
* @returns The response from the Artifact Service if the file container was successfully created
*/
async createArtifactInFileContainer(
artifactName: string
): Promise<ArtifactResponse> {
const parameters: CreateArtifactParameters = {
Type: 'actions_storage',
Name: artifactName
}
const data: string = JSON.stringify(parameters, null, 2)
const artifactUrl = getArtifactUrl()
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediatly
const client = this.uploadHttpManager.getClient(0)
const requestOptions = getRequestOptions('application/json', false, false)
const rawResponse = await client.post(artifactUrl, data, requestOptions)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode) && body) {
return JSON.parse(body)
} else {
// eslint-disable-next-line no-console
console.log(rawResponse)
throw new Error(
`Unable to create a container for the artifact ${artifactName}`
)
}
}
/**
* Concurrently upload all of the files in chunks
* @param {string} uploadUrl Base Url for the artifact that was created
* @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
* @returns The size of all the files uploaded in bytes
*/
async uploadArtifactToFileContainer(
uploadUrl: string,
filesToUpload: UploadSpecification[],
options?: UploadOptions
): Promise<UploadResults> {
const FILE_CONCURRENCY = getUploadFileConcurrency()
const MAX_CHUNK_SIZE = getUploadChunkSize()
debug(
`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`
)
const parameters: UploadFileParameters[] = []
// by default, file uploads will continue if there is an error unless specified differently in the options
let continueOnError = true
if (options) {
if (options.continueOnError === false) {
continueOnError = false
}
}
// prepare the necessary parameters to upload all the files
for (const file of filesToUpload) {
const resourceUrl = new URL(uploadUrl)
resourceUrl.searchParams.append('itemPath', file.uploadFilePath)
parameters.push({
file: file.absoluteFilePath,
resourceUrl: resourceUrl.toString(),
maxChunkSize: MAX_CHUNK_SIZE,
continueOnError
})
}
const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()]
const failedItemsToReport: string[] = []
let currentFile = 0
let completedFiles = 0
let uploadFileSize = 0
let totalFileSize = 0
let abortPendingFileUploads = false
this.statusReporter.setTotalNumberOfFilesToUpload(filesToUpload.length)
this.statusReporter.start()
// only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
await Promise.all(
parallelUploads.map(async index => {
while (currentFile < filesToUpload.length) {
const currentFileParameters = parameters[currentFile]
currentFile += 1
if (abortPendingFileUploads) {
failedItemsToReport.push(currentFileParameters.file)
continue
}
const startTime = performance.now()
const uploadFileResult = await this.uploadFileAsync(
index,
currentFileParameters
)
debug(
`File: ${++completedFiles}/${filesToUpload.length}. ${
currentFileParameters.file
} took ${(performance.now() - startTime).toFixed(
3
)} milliseconds to finish upload`
)
uploadFileSize += uploadFileResult.successfullUploadSize
totalFileSize += uploadFileResult.totalSize
if (uploadFileResult.isSuccess === false) {
failedItemsToReport.push(currentFileParameters.file)
if (!continueOnError) {
// existing uploads will be able to finish however all pending uploads will fail fast
abortPendingFileUploads = true
}
}
this.statusReporter.incrementProcessedCount()
}
})
)
this.statusReporter.stop()
// done uploading, safety dispose all connections
this.uploadHttpManager.disposeAndReplaceAllClients()
info(`Total size of all the files uploaded is ${uploadFileSize} bytes`)
return {
uploadSize: uploadFileSize,
totalSize: totalFileSize,
failedItems: failedItemsToReport
}
}
/**
* Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
* If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
* @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
* @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
* @returns The size of the file that was uploaded in bytes along with any failed uploads
*/
private async uploadFileAsync(
httpClientIndex: number,
parameters: UploadFileParameters
): Promise<UploadFileResult> {
const totalFileSize: number = (await stat(parameters.file)).size
let offset = 0
let isUploadSuccessful = true
let failedChunkSizes = 0
let uploadFileSize = 0
let isGzip = true
// the file that is being uploaded is less than 64k in size, to increase thoroughput and to minimize disk I/O
// for creating a new GZip file, an in-memory buffer is used for compression
if (totalFileSize < 65536) {
const buffer = await createGZipFileInBuffer(parameters.file)
let uploadStream: NodeJS.ReadableStream
if (totalFileSize < buffer.byteLength) {
// compression did not help with reducing the size, use a readable stream from the original file for upload
uploadStream = fs.createReadStream(parameters.file)
isGzip = false
uploadFileSize = totalFileSize
} else {
// create a readable stream using a PassThrough stream that is both readable and writable
const passThrough = new stream.PassThrough()
passThrough.end(buffer)
uploadStream = passThrough
uploadFileSize = buffer.byteLength
}
const result = await this.uploadChunk(
httpClientIndex,
parameters.resourceUrl,
uploadStream,
0,
uploadFileSize - 1,
uploadFileSize,
isGzip,
totalFileSize
)
if (!result) {
// chunk failed to upload
isUploadSuccessful = false
failedChunkSizes += uploadFileSize
warning(`Aborting upload for ${parameters.file} due to failure`)
}
return {
isSuccess: isUploadSuccessful,
successfullUploadSize: uploadFileSize - failedChunkSizes,
totalSize: totalFileSize
}
} else {
// the file that is being uploaded is greater than 64k in size, a temprorary file gets created on disk using the
// npm tmp-promise package and this file gets used during compression for the GZip file that gets created
return tmp
.file()
.then(async tmpFile => {
// create a GZip file of the original file being uploaded, the original file should not be modified in any way
uploadFileSize = await createGZipFileOnDisk(
parameters.file,
tmpFile.path
)
let uploadFilePath = tmpFile.path
// compression did not help with size reduction, use the original file for upload and delete the temp GZip file
if (totalFileSize < uploadFileSize) {
uploadFileSize = totalFileSize
uploadFilePath = parameters.file
isGzip = false
tmpFile.cleanup()
}
let abortFileUpload = false
// upload only a single chunk at a time
while (offset < uploadFileSize) {
const chunkSize = Math.min(
uploadFileSize - offset,
parameters.maxChunkSize
)
if (abortFileUpload) {
// if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
failedChunkSizes += chunkSize
continue
}
// if an individual file is greater than 100MB (1024*1024*100) in size, display extra information about the upload status
if (uploadFileSize > 104857600) {
this.statusReporter.updateLargeFileStatus(
parameters.file,
offset,
uploadFileSize
)
}
const start = offset
const end = offset + chunkSize - 1
offset += parameters.maxChunkSize
const result = await this.uploadChunk(
httpClientIndex,
parameters.resourceUrl,
fs.createReadStream(uploadFilePath, {
start,
end,
autoClose: false
}),
start,
end,
uploadFileSize,
isGzip,
totalFileSize
)
if (!result) {
// Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
// successfully uploaded so the server may report a different size for what was uploaded
isUploadSuccessful = false
failedChunkSizes += chunkSize
warning(`Aborting upload for ${parameters.file} due to failure`)
abortFileUpload = true
}
}
})
.then(
async (): Promise<UploadFileResult> => {
// only after the file upload is complete and the temporary file is deleted, return the UploadResult
return new Promise(resolve => {
resolve({
isSuccess: isUploadSuccessful,
successfullUploadSize: uploadFileSize - failedChunkSizes,
totalSize: totalFileSize
})
})
}
)
}
}
/**
* Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
* indicates a retryable status, we try to upload the chunk as well
* @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
* @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
* @param {NodeJS.ReadableStream} data Stream of the file that will be uploaded
* @param {number} start Starting byte index of file that the chunk belongs to
* @param {number} end Ending byte index of file that the chunk belongs to
* @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
* @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
* @param {number} totalFileSize Original total size of the file that is being uploaded
* @returns if the chunk was successfully uploaded
*/
private async uploadChunk(
httpClientIndex: number,
resourceUrl: string,
data: NodeJS.ReadableStream,
start: number,
end: number,
uploadFileSize: number,
isGzip: boolean,
totalFileSize: number
): Promise<boolean> {
// prepare all the necessary headers before making any http call
const requestOptions = getRequestOptions(
'application/octet-stream',
true,
isGzip,
totalFileSize,
end - start + 1,
getContentRange(start, end, uploadFileSize)
)
const uploadChunkRequest = async (): Promise<IHttpClientResponse> => {
const client = this.uploadHttpManager.getClient(httpClientIndex)
return await client.sendStream('PUT', resourceUrl, data, requestOptions)
}
let retryCount = 0
const retryLimit = getUploadRetryCount()
// allow for failed chunks to be retried multiple times
while (retryCount <= retryLimit) {
try {
const response = await uploadChunkRequest()
// Always read the body of the response. There is potential for a resource leak if the body is not read which will
// result in the connection remaining open along with unintended consequences when trying to dispose of the client
await response.readBody()
if (isSuccessStatusCode(response.message.statusCode)) {
return true
} else if (isRetryableStatusCode(response.message.statusCode)) {
retryCount++
if (retryCount > retryLimit) {
info(
`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`
)
return false
} else {
info(
`HTTP ${response.message.statusCode} during chunk upload, will retry at offset ${start} after ${getRetryWaitTimeInMilliseconds} milliseconds. Retry count #${retryCount}. URL ${resourceUrl}`
)
this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex)
await new Promise(resolve =>
setTimeout(resolve, getRetryWaitTimeInMilliseconds())
)
}
} else {
info(`#ERROR# Unable to upload chunk to ${resourceUrl}`)
// eslint-disable-next-line no-console
console.log(response)
return false
}
} catch (error) {
// eslint-disable-next-line no-console
console.log(error)
retryCount++
if (retryCount > retryLimit) {
info(
`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`
)
return false
} else {
info(`Retrying chunk upload after encountering an error`)
this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex)
await new Promise(resolve =>
setTimeout(resolve, getRetryWaitTimeInMilliseconds())
)
}
}
}
return false
}
/**
* Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
* Updating the size indicates that we are done uploading all the contents of the artifact
*/
async patchArtifactSize(size: number, artifactName: string): Promise<void> {
const requestOptions = getRequestOptions('application/json', false, false)
const resourceUrl = new URL(getArtifactUrl())
resourceUrl.searchParams.append('artifactName', artifactName)
const parameters: PatchArtifactSize = {Size: size}
const data: string = JSON.stringify(parameters, null, 2)
debug(`URL is ${resourceUrl.toString()}`)
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediatly
const client = this.uploadHttpManager.getClient(0)
const rawResponse: HttpClientResponse = await client.patch(
resourceUrl.toString(),
data,
requestOptions
)
const body: string = await rawResponse.readBody()
if (isSuccessStatusCode(rawResponse.message.statusCode)) {
debug(
`Artifact ${artifactName} has been successfully uploaded, total size ${size}`
)
} else if (rawResponse.message.statusCode === 404) {
throw new Error(`An Artifact with the name ${artifactName} was not found`)
} else {
// eslint-disable-next-line no-console
console.log(body)
throw new Error(`Unable to finish uploading artifact ${artifactName}`)
}
}
}
interface UploadFileParameters {
file: string
resourceUrl: string
maxChunkSize: number
continueOnError: boolean
}
interface UploadFileResult {
isSuccess: boolean
successfullUploadSize: number
totalSize: number
}

View File

@ -1,7 +1,7 @@
import * as fs from 'fs'
import {debug} from '@actions/core'
import {join, normalize, resolve} from 'path'
import {checkArtifactName} from './internal-utils'
import {checkArtifactName, checkArtifactFilePath} from './utils'
export interface UploadSpecification {
absoluteFilePath: string
@ -58,7 +58,6 @@ export function getUploadSpecification(
if (!fs.existsSync(file)) {
throw new Error(`File ${file} does not exist`)
}
if (!fs.lstatSync(file).isDirectory()) {
// Normalize and resolve, this allows for either absolute or relative paths to be used
file = normalize(file)
@ -69,6 +68,10 @@ export function getUploadSpecification(
)
}
// Check for forbidden characters in file paths that will be rejected during upload
const uploadPath = file.replace(rootDirectory, '')
checkArtifactFilePath(uploadPath)
/*
uploadFilePath denotes where the file will be uploaded in the file container on the server. During a run, if multiple artifacts are uploaded, they will all
be saved in the same container. The artifact name is used as the root directory in the container to separate and distinguish uploaded artifacts
@ -81,7 +84,7 @@ export function getUploadSpecification(
*/
specifications.push({
absoluteFilePath: file,
uploadFilePath: join(artifactName, file.replace(rootDirectory, ''))
uploadFilePath: join(artifactName, uploadPath)
})
} else {
// Directories are rejected by the server during upload

View File

@ -0,0 +1,90 @@
import {info} from '@actions/core'
/**
* Upload Status Reporter that displays information about the progress/status of an artifact that is being uploaded
*
* Every 10 seconds, the total status of the upload gets displayed. If there is a large file that is being uploaded,
* extra information about the individual status of an upload can also be displayed
*/
export class UploadStatusReporter {
private totalNumberOfFilesToUpload = 0
private processedCount = 0
private largeUploads = new Map<string, string>()
private totalUploadStatus: NodeJS.Timeout | undefined
private largeFileUploadStatus: NodeJS.Timeout | undefined
constructor() {
this.totalUploadStatus = undefined
this.largeFileUploadStatus = undefined
}
setTotalNumberOfFilesToUpload(fileTotal: number): void {
this.totalNumberOfFilesToUpload = fileTotal
}
start(): void {
const _this = this
// displays information about the total upload status every 10 seconds
this.totalUploadStatus = setInterval(function() {
// display 1 decimal place without any rounding
const percentage = _this.formatPercentage(
_this.processedCount,
_this.totalNumberOfFilesToUpload
)
info(
`Total file(s): ${
_this.totalNumberOfFilesToUpload
} ---- Processed file #${_this.processedCount} (${percentage.slice(
0,
percentage.indexOf('.') + 2
)}%)`
)
}, 10000)
// displays extra information about any large files that take a significant amount of time to upload every 1 second
this.largeFileUploadStatus = setInterval(function() {
for (const value of Array.from(_this.largeUploads.values())) {
info(value)
}
// delete all entires in the map after displaying the information so it will not be displayed again unless explicitly added
_this.largeUploads = new Map<string, string>()
}, 1000)
}
updateLargeFileStatus(
fileName: string,
numerator: number,
denomiator: number
): void {
// display 1 decimal place without any rounding
const percentage = this.formatPercentage(numerator, denomiator)
const displayInformation = `Uploading ${fileName} (${percentage.slice(
0,
percentage.indexOf('.') + 2
)}%)`
// any previously added display information should be overwritten for the specific large file because a map is being used
this.largeUploads.set(fileName, displayInformation)
}
stop(): void {
if (this.totalUploadStatus) {
clearInterval(this.totalUploadStatus)
}
if (this.largeFileUploadStatus) {
clearInterval(this.largeFileUploadStatus)
}
}
incrementProcessedCount(): void {
this.processedCount++
}
private formatPercentage(numerator: number, denominator: number): string {
// toFixed() rounds, so use extra precision to display accurate information even though 4 decimal places are not displayed
return ((numerator / denominator) * 100).toFixed(4).toString()
}
}

View File

@ -7,7 +7,7 @@ import {
getRuntimeToken,
getRuntimeUrl,
getWorkFlowRunId
} from './internal-config-variables'
} from './config-variables'
/**
* Parses a env variable that is a number
@ -59,17 +59,40 @@ export function getContentRange(
return `bytes ${start}-${end}/${total}`
}
/**
* Sets all the necessary headers when making HTTP calls
* @param {string} contentType the type of content being uploaded
* @param {boolean} isKeepAlive is the same connection being used to make multiple calls
* @param {boolean} isGzip is the connection being used to upload GZip compressed content
* @param {number} uncompressedLength the original size of the content if something is being uploaded that has been compressed
* @param {number} contentLength the length of the content that is being uploaded
* @param {string} contentRange the range of the content that is being uploaded
* @returns appropriate request options to make a specific http call
*/
export function getRequestOptions(
contentType?: string,
isKeepAlive?: boolean,
isGzip?: boolean,
uncompressedLength?: number,
contentLength?: number,
contentRange?: string
): IHeaders {
const requestOptions: IHeaders = {
// same Accept type for each http call that gets made
Accept: `application/json;api-version=${getApiVersion()}`
}
if (contentType) {
requestOptions['Content-Type'] = contentType
}
if (isKeepAlive) {
requestOptions['Connection'] = 'Keep-Alive'
// keep alive for at least 10 seconds before closing the connection
requestOptions['Keep-Alive'] = '10'
}
if (isGzip) {
requestOptions['Content-Encoding'] = 'gzip'
requestOptions['x-tfs-filelength'] = uncompressedLength
}
if (contentLength) {
requestOptions['Content-Length'] = contentLength
}
@ -96,20 +119,54 @@ export function getArtifactUrl(): string {
* from the server if attempted to be sent over. These characters are not allowed due to limitations with certain
* file systems such as NTFS. To maintain platform-agnostic behavior, all characters that are not supported by an
* individual filesystem/platform will not be supported on all fileSystems/platforms
*
* FilePaths can include characters such as \ and / which are not permitted in the artifact name alone
*/
const invalidCharacters = ['\\', '/', '"', ':', '<', '>', '|', '*', '?', ' ']
const invalidArtifactFilePathCharacters = [
'"',
':',
'<',
'>',
'|',
'*',
'?',
' '
]
const invalidArtifactNameCharacters = [
...invalidArtifactFilePathCharacters,
'\\',
'/'
]
/**
* Scans the name of the item being uploaded to make sure there are no illegal characters
* Scans the name of the artifact to make sure there are no illegal characters
*/
export function checkArtifactName(name: string): void {
if (!name) {
throw new Error(`Artifact name: ${name}, is incorrectly provided`)
}
for (const invalidChar of invalidCharacters) {
for (const invalidChar of invalidArtifactNameCharacters) {
if (name.includes(invalidChar)) {
throw new Error(
`Artifact name is not valid: ${name}. Contains character: "${invalidChar}". Invalid characters include: ${invalidCharacters.toString()}.`
`Artifact name is not valid: ${name}. Contains character: "${invalidChar}". Invalid artifact name characters include: ${invalidArtifactNameCharacters.toString()}.`
)
}
}
}
/**
* Scans the name of the filePath used to make sure there are no illegal characters
*/
export function checkArtifactFilePath(path: string): void {
if (!path) {
throw new Error(`Artifact path: ${path}, is incorrectly provided`)
}
for (const invalidChar of invalidArtifactFilePathCharacters) {
if (path.includes(invalidChar)) {
throw new Error(
`Artifact path is not valid: ${path}. Contains character: "${invalidChar}". Invalid characters include: ${invalidArtifactFilePathCharacters.toString()}.`
)
}
}