1
0
Fork 0

Merge pull request #9 from WarpBuilds/hotfix-concurrency-limits-uploads

hotfix: concurrency limits in file uploads
pull/1935/head
Prajjwal 2024-07-18 11:06:47 +05:30 committed by GitHub
commit e85e3f8677
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 50 additions and 32 deletions

View File

@ -1,6 +1,6 @@
{ {
"name": "github-actions.warp-cache", "name": "github-actions.warp-cache",
"version": "1.2.2", "version": "1.2.5",
"preview": true, "preview": true,
"description": "Github action to use WarpBuild's in-house cache offering", "description": "Github action to use WarpBuild's in-house cache offering",
"keywords": [ "keywords": [

View File

@ -124,6 +124,11 @@ export function getCacheVersion(
components.push('windows-only') components.push('windows-only')
} }
// Check for mac platforms if enableCrossOsArchive is false
if (process.platform === 'darwin' && !enableCrossOsArchive) {
components.push('mac-only')
}
// Add architecture to cache version // Add architecture to cache version
if (!enableCrossArchArchive) { if (!enableCrossArchArchive) {
components.push(process.arch) components.push(process.arch)

View File

@ -1,5 +1,6 @@
import * as core from '@actions/core' import * as core from '@actions/core'
import * as utils from './cacheUtils' import * as utils from './cacheUtils'
import * as os from 'os'
import fs from 'fs' import fs from 'fs'
@ -52,9 +53,8 @@ async function uploadChunk(
} }
} catch (error) { } catch (error) {
throw new Error( throw new Error(
`Cache service responded with ${ `Cache service responded with ${(error as AxiosError).response
(error as AxiosError).status ?.status} during upload chunk.`
} during upload chunk.`
) )
} }
} }
@ -66,41 +66,54 @@ export async function uploadFileToS3(
const fileSize = utils.getArchiveFileSizeInBytes(archivePath) const fileSize = utils.getArchiveFileSizeInBytes(archivePath)
const numberOfChunks = preSignedURLs.length const numberOfChunks = preSignedURLs.length
let concurrency = 4
// Adjust concurrency based on the number of cpu cores
if (os.cpus().length > 4) {
concurrency = 8
}
const fd = fs.openSync(archivePath, 'r') const fd = fs.openSync(archivePath, 'r')
core.debug('Awaiting all uploads') core.debug(`Awaiting all uploads with concurrency limit of ${concurrency}`)
let offset = 0 let offset = 0
const completedParts: InternalS3CompletedPart[] = []
try { try {
const completedParts = await Promise.all( for (let i = 0; i < numberOfChunks; i += concurrency) {
preSignedURLs.map(async (presignedURL, index) => { const batch = preSignedURLs
const chunkSize = Math.ceil(fileSize / numberOfChunks) .slice(i, i + concurrency)
const start = offset .map((presignedURL, index) => {
const end = offset + chunkSize - 1 const chunkIndex = i + index
offset += chunkSize const chunkSize = Math.ceil(fileSize / numberOfChunks)
const start = offset
const end = offset + chunkSize - 1
offset += chunkSize
return await uploadChunk( return uploadChunk(
presignedURL, presignedURL,
() => () =>
fs fs
.createReadStream(archivePath, { .createReadStream(archivePath, {
fd, fd,
start, start,
end, end,
autoClose: false autoClose: false
}) })
.on('error', error => { .on('error', error => {
throw new Error( throw new Error(
`Cache upload failed because file read failed with ${error.message}` `Cache upload failed because file read failed with ${error.message}`
) )
}), }),
index + 1, chunkIndex + 1,
start, start,
end end
) )
}) })
)
const batchResults = await Promise.all(batch)
completedParts.push(...batchResults)
}
return completedParts return completedParts
} finally { } finally {
fs.closeSync(fd) fs.closeSync(fd)