1
0
Fork 0

Merge pull request #9 from WarpBuilds/hotfix-concurrency-limits-uploads

hotfix: concurrency limits in file uploads
pull/1935/head
Prajjwal 2024-07-18 11:06:47 +05:30 committed by GitHub
commit e85e3f8677
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 50 additions and 32 deletions

View File

@ -1,6 +1,6 @@
{
"name": "github-actions.warp-cache",
"version": "1.2.2",
"version": "1.2.5",
"preview": true,
"description": "Github action to use WarpBuild's in-house cache offering",
"keywords": [

View File

@ -124,6 +124,11 @@ export function getCacheVersion(
components.push('windows-only')
}
// Check for mac platforms if enableCrossOsArchive is false
if (process.platform === 'darwin' && !enableCrossOsArchive) {
components.push('mac-only')
}
// Add architecture to cache version
if (!enableCrossArchArchive) {
components.push(process.arch)

View File

@ -1,5 +1,6 @@
import * as core from '@actions/core'
import * as utils from './cacheUtils'
import * as os from 'os'
import fs from 'fs'
@ -52,9 +53,8 @@ async function uploadChunk(
}
} catch (error) {
throw new Error(
`Cache service responded with ${
(error as AxiosError).status
} during upload chunk.`
`Cache service responded with ${(error as AxiosError).response
?.status} during upload chunk.`
)
}
}
@ -66,41 +66,54 @@ export async function uploadFileToS3(
const fileSize = utils.getArchiveFileSizeInBytes(archivePath)
const numberOfChunks = preSignedURLs.length
let concurrency = 4
// Adjust concurrency based on the number of cpu cores
if (os.cpus().length > 4) {
concurrency = 8
}
const fd = fs.openSync(archivePath, 'r')
core.debug('Awaiting all uploads')
core.debug(`Awaiting all uploads with concurrency limit of ${concurrency}`)
let offset = 0
const completedParts: InternalS3CompletedPart[] = []
try {
const completedParts = await Promise.all(
preSignedURLs.map(async (presignedURL, index) => {
const chunkSize = Math.ceil(fileSize / numberOfChunks)
const start = offset
const end = offset + chunkSize - 1
offset += chunkSize
for (let i = 0; i < numberOfChunks; i += concurrency) {
const batch = preSignedURLs
.slice(i, i + concurrency)
.map((presignedURL, index) => {
const chunkIndex = i + index
const chunkSize = Math.ceil(fileSize / numberOfChunks)
const start = offset
const end = offset + chunkSize - 1
offset += chunkSize
return await uploadChunk(
presignedURL,
() =>
fs
.createReadStream(archivePath, {
fd,
start,
end,
autoClose: false
})
.on('error', error => {
throw new Error(
`Cache upload failed because file read failed with ${error.message}`
)
}),
index + 1,
start,
end
)
})
)
return uploadChunk(
presignedURL,
() =>
fs
.createReadStream(archivePath, {
fd,
start,
end,
autoClose: false
})
.on('error', error => {
throw new Error(
`Cache upload failed because file read failed with ${error.message}`
)
}),
chunkIndex + 1,
start,
end
)
})
const batchResults = await Promise.all(batch)
completedParts.push(...batchResults)
}
return completedParts
} finally {
fs.closeSync(fd)