mirror of https://github.com/actions/toolkit
This should work E2E...
parent
7e8f13b4e6
commit
12fa0be194
|
@ -1,7 +1,34 @@
|
|||
|
||||
import * as core from '@actions/core'
|
||||
import {checkArtifactName} from './path-and-artifact-name-validation'
|
||||
import {UploadOptions} from './upload-options'
|
||||
import {UploadResponse} from './upload-response'
|
||||
import { UploadSpecification, getUploadSpecification } from './upload-specification'
|
||||
import { ArtifactHttpClient } from '../artifact-http-client'
|
||||
import { ArtifactServiceClientJSON } from 'src/generated'
|
||||
|
||||
import {BlobClient, BlockBlobUploadStreamOptions} from '@azure/storage-blob'
|
||||
import { TransferProgressEvent } from '@azure/core-http';
|
||||
import * as a from 'archiver'
|
||||
import * as fs from 'fs'
|
||||
import * as stream from 'stream'
|
||||
|
||||
import {getBackendIds, BackendIds} from '../util'
|
||||
|
||||
const bufferSize = 1024 * 1024 * 8 // 8 MB
|
||||
|
||||
// Custom stream transformer so we can set the highWaterMark property
|
||||
// See https://github.com/nodejs/node/issues/8855
|
||||
export class ZipUploadStream extends stream.Transform {
|
||||
constructor(bufferSize: number) {
|
||||
super({
|
||||
highWaterMark: bufferSize
|
||||
})
|
||||
}
|
||||
|
||||
_transform(chunk:any, enc:any, cb:any) {
|
||||
cb(null, chunk)
|
||||
}
|
||||
}
|
||||
|
||||
export async function uploadArtifact(
|
||||
name: string,
|
||||
|
@ -10,19 +37,148 @@ export async function uploadArtifact(
|
|||
options?: UploadOptions | undefined
|
||||
): Promise<UploadResponse> {
|
||||
|
||||
let uploadByteCount = 0
|
||||
|
||||
// Need to keep checking the artifact name
|
||||
checkArtifactName(name)
|
||||
|
||||
// TODO Twirp call to create new artifact
|
||||
// Get specification for the files being uploaded
|
||||
const uploadSpecification: UploadSpecification[] = getUploadSpecification(
|
||||
name,
|
||||
rootDirectory,
|
||||
files
|
||||
)
|
||||
|
||||
// TODO Upload to blob storage using SAS URL
|
||||
// testing.ts is being used to prototype this functionality
|
||||
if (uploadSpecification.length === 0) {
|
||||
core.warning(`No files found that can be uploaded`)
|
||||
} else {
|
||||
const artifactClient = new ArtifactHttpClient('@actions/artifact-upload')
|
||||
const jsonClient = new ArtifactServiceClientJSON(artifactClient)
|
||||
|
||||
// TODO Twirp call to finalize the new artifact upload
|
||||
const backendIDs: BackendIds = getBackendIds()
|
||||
|
||||
try {
|
||||
|
||||
|
||||
|
||||
|
||||
const createResp = await jsonClient.CreateArtifact({workflowRunBackendId: backendIDs.workflowRunBackendId, workflowJobRunBackendId: backendIDs.workflowJobRunBackendId, name: name, version: 4})
|
||||
|
||||
if (!createResp.ok) {
|
||||
core.error("CreateArtifact failed")
|
||||
}
|
||||
|
||||
console.log(createResp.signedUploadUrl)
|
||||
|
||||
// Blob upload start
|
||||
|
||||
const blobClient = new BlobClient(createResp.signedUploadUrl);
|
||||
const zip = a.create('zip', {
|
||||
zlib: { level: 9 } // Sets the compression level.
|
||||
// Available options are 0-9
|
||||
// 0 => no compression
|
||||
// 1 => fastest with low compression
|
||||
// 9 => highest compression ratio but the slowest
|
||||
});
|
||||
|
||||
console.log("file specification")
|
||||
for (const file of uploadSpecification) {
|
||||
console.log("uploadPath:" + file.uploadFilePath + " absolute:" + file.absoluteFilePath)
|
||||
zip.append(fs.createReadStream(file.absoluteFilePath), {name: file.uploadFilePath})
|
||||
}
|
||||
|
||||
const zipUploadStream = new ZipUploadStream(bufferSize)
|
||||
zip.pipe(zipUploadStream)
|
||||
zip.finalize();
|
||||
|
||||
console.log("Write high watermark value " + zipUploadStream.writableHighWaterMark)
|
||||
console.log("Read high watermark value " + zipUploadStream.readableHighWaterMark)
|
||||
|
||||
// good practice to catch warnings (ie stat failures and other non-blocking errors)
|
||||
zip.on('warning', function(err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
console.log("zip error ENOENT")
|
||||
} else {
|
||||
console.log("some other warning ")
|
||||
console.log(err)
|
||||
}
|
||||
});
|
||||
|
||||
// good practice to catch this error explicitly
|
||||
zip.on('error', function(err) {
|
||||
console.log("some error with zip ")
|
||||
console.log(err)
|
||||
});
|
||||
|
||||
zip.on("progress", function(progress: a.ProgressData) {
|
||||
console.log(progress)
|
||||
|
||||
/* This outputs data like this, we could potentially do something with this for even more logging to show the status of the zip creation
|
||||
{
|
||||
entries: { total: 7, processed: 1 },
|
||||
fs: { totalBytes: 0, processedBytes: 0 }
|
||||
}
|
||||
{
|
||||
entries: { total: 7, processed: 2 },
|
||||
fs: { totalBytes: 0, processedBytes: 0 }
|
||||
}
|
||||
*/
|
||||
})
|
||||
|
||||
|
||||
// We can add these to debug logging
|
||||
zip.on('end', function() {
|
||||
console.log("zip ending")
|
||||
});
|
||||
zip.on('finish', function() {
|
||||
console.log("zip finished")
|
||||
});
|
||||
|
||||
// Upload options
|
||||
const maxBuffers = 5
|
||||
const blockBlobClient = blobClient.getBlockBlobClient()
|
||||
|
||||
var myCallback = function(progress: TransferProgressEvent) {
|
||||
console.log("Byte upload count " + progress.loadedBytes)
|
||||
uploadByteCount = progress.loadedBytes
|
||||
};
|
||||
|
||||
const options: BlockBlobUploadStreamOptions = {
|
||||
blobHTTPHeaders: { "blobContentType": "zip" },
|
||||
onProgress: myCallback
|
||||
}
|
||||
|
||||
// Upload!
|
||||
try {
|
||||
await blockBlobClient.uploadStream(
|
||||
zipUploadStream,
|
||||
bufferSize,
|
||||
maxBuffers,
|
||||
options
|
||||
);
|
||||
} catch (error){
|
||||
console.log(error)
|
||||
}
|
||||
console.log("final upload size in bytes is " + uploadByteCount)
|
||||
|
||||
console.log("we are done with the blob upload!")
|
||||
// Blob upload end
|
||||
|
||||
const finalizeResp = await jsonClient.FinalizeArtifact({workflowRunBackendId: backendIDs.workflowRunBackendId, workflowJobRunBackendId: backendIDs.workflowJobRunBackendId, name: name, size: BigInt(5)})
|
||||
|
||||
if (!finalizeResp.ok) {
|
||||
core.error("FinalizeArtifact failed")
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
}
|
||||
|
||||
console.log("FinalizeArtifact succeeded")
|
||||
}
|
||||
|
||||
const uploadResponse: UploadResponse = {
|
||||
artifactName: name,
|
||||
size: 0
|
||||
size: uploadByteCount
|
||||
}
|
||||
|
||||
return uploadResponse
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
import * as fs from 'fs'
|
||||
import {debug} from '@actions/core'
|
||||
import {join, normalize, resolve} from 'path'
|
||||
import {checkArtifactFilePath} from './path-and-artifact-name-validation'
|
||||
|
||||
export interface UploadSpecification {
|
||||
absoluteFilePath: string
|
||||
uploadFilePath: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a specification that describes how each file that is part of the artifact will be uploaded
|
||||
* @param artifactName the name of the artifact being uploaded. Used during upload to denote where the artifact is stored on the server
|
||||
* @param rootDirectory an absolute file path that denotes the path that should be removed from the beginning of each artifact file
|
||||
* @param artifactFiles a list of absolute file paths that denote what should be uploaded as part of the artifact
|
||||
*/
|
||||
export function getUploadSpecification(
|
||||
artifactName: string,
|
||||
rootDirectory: string,
|
||||
artifactFiles: string[]
|
||||
): UploadSpecification[] {
|
||||
// artifact name was checked earlier on, no need to check again
|
||||
const specifications: UploadSpecification[] = []
|
||||
|
||||
if (!fs.existsSync(rootDirectory)) {
|
||||
throw new Error(`Provided rootDirectory ${rootDirectory} does not exist`)
|
||||
}
|
||||
if (!fs.statSync(rootDirectory).isDirectory()) {
|
||||
throw new Error(
|
||||
`Provided rootDirectory ${rootDirectory} is not a valid directory`
|
||||
)
|
||||
}
|
||||
// Normalize and resolve, this allows for either absolute or relative paths to be used
|
||||
rootDirectory = normalize(rootDirectory)
|
||||
rootDirectory = resolve(rootDirectory)
|
||||
|
||||
/*
|
||||
Example to demonstrate behavior
|
||||
|
||||
Input:
|
||||
artifactName: my-artifact
|
||||
rootDirectory: '/home/user/files/plz-upload'
|
||||
artifactFiles: [
|
||||
'/home/user/files/plz-upload/file1.txt',
|
||||
'/home/user/files/plz-upload/file2.txt',
|
||||
'/home/user/files/plz-upload/dir/file3.txt'
|
||||
]
|
||||
|
||||
Output:
|
||||
specifications: [
|
||||
['/home/user/files/plz-upload/file1.txt', 'my-artifact/file1.txt'],
|
||||
['/home/user/files/plz-upload/file1.txt', 'my-artifact/file2.txt'],
|
||||
['/home/user/files/plz-upload/file1.txt', 'my-artifact/dir/file3.txt']
|
||||
]
|
||||
*/
|
||||
for (let file of artifactFiles) {
|
||||
if (!fs.existsSync(file)) {
|
||||
throw new Error(`File ${file} does not exist`)
|
||||
}
|
||||
if (!fs.statSync(file).isDirectory()) {
|
||||
// Normalize and resolve, this allows for either absolute or relative paths to be used
|
||||
file = normalize(file)
|
||||
file = resolve(file)
|
||||
if (!file.startsWith(rootDirectory)) {
|
||||
throw new Error(
|
||||
`The rootDirectory: ${rootDirectory} is not a parent directory of the file: ${file}`
|
||||
)
|
||||
}
|
||||
|
||||
// Check for forbidden characters in file paths that will be rejected during upload
|
||||
const uploadPath = file.replace(rootDirectory, '')
|
||||
checkArtifactFilePath(uploadPath)
|
||||
|
||||
/*
|
||||
uploadFilePath denotes where the file will be uploaded in the file container on the server. During a run, if multiple artifacts are uploaded, they will all
|
||||
be saved in the same container. The artifact name is used as the root directory in the container to separate and distinguish uploaded artifacts
|
||||
|
||||
path.join handles all the following cases and would return 'artifact-name/file-to-upload.txt
|
||||
join('artifact-name/', 'file-to-upload.txt')
|
||||
join('artifact-name/', '/file-to-upload.txt')
|
||||
join('artifact-name', 'file-to-upload.txt')
|
||||
join('artifact-name', '/file-to-upload.txt')
|
||||
*/
|
||||
specifications.push({
|
||||
absoluteFilePath: file,
|
||||
uploadFilePath: join(artifactName, uploadPath)
|
||||
})
|
||||
} else {
|
||||
// Directories are rejected by the server during upload
|
||||
debug(`Removing ${file} from rawSearchResults because it is a directory`)
|
||||
}
|
||||
}
|
||||
return specifications
|
||||
}
|
Loading…
Reference in New Issue