From 2426e1ecbf2ba19c2e5b5e1a4fd7e12b9237b871 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 09:50:04 -0700 Subject: [PATCH 01/35] chore: apply prettier formatting --- _worker.js | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/_worker.js b/_worker.js index 1a42507..78cd16c 100644 --- a/_worker.js +++ b/_worker.js @@ -6,8 +6,8 @@ const EXPIRY = 3600; const MIME = "application/vnd.git-lfs+json"; const METHOD_FOR = { - "upload": "PUT", - "download": "GET", + upload: "PUT", + download: "GET", }; async function sign(s3, bucket, path, method) { @@ -30,7 +30,7 @@ function parseAuthorization(req) { throw new Response(null, { status: 400 }); } - const buffer = Uint8Array.from(atob(encoded), c => c.charCodeAt(0)); + const buffer = Uint8Array.from(atob(encoded), (c) => c.charCodeAt(0)); const decoded = new TextDecoder().decode(buffer).normalize(); const index = decoded.indexOf(":"); if (index === -1 || /[\0-\x1F\x7F]/.test(decoded)) { @@ -47,7 +47,7 @@ async function fetch(req, env) { if (req.method === "GET") { return Response.redirect(HOMEPAGE, 302); } else { - return new Response(null, { status: 405, headers: { "Allow": "GET" } }); + return new Response(null, { status: 405, headers: { Allow: "GET" } }); } } @@ -56,7 +56,7 @@ async function fetch(req, env) { } if (req.method !== "POST") { - return new Response(null, { status: 405, headers: { "Allow": "POST" } }); + return new Response(null, { status: 405, headers: { Allow: "POST" } }); } // in practice, we'd rather not break out-of-spec clients not setting these @@ -92,13 +92,19 @@ async function fetch(req, env) { const method = METHOD_FOR[operation]; const response = JSON.stringify({ transfer: "basic", - objects: await Promise.all(objects.map(async ({ oid, size }) => ({ - oid, size, - authenticated: true, - actions: { - [operation]: { href: await sign(s3, bucket, oid, method), expires_in }, - }, - }))), + objects: await Promise.all( + objects.map(async ({ oid, size }) => ({ + oid, + size, + authenticated: true, + actions: { + [operation]: { + href: await sign(s3, bucket, oid, method), + expires_in, + }, + }, + })) + ), }); return new Response(response, { From 9d1554ff86682293fb57fb9a463c42a6f01fddc8 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 09:51:39 -0700 Subject: [PATCH 02/35] fix: use const for content type, update repo name --- _worker.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_worker.js b/_worker.js index 78cd16c..beee8a4 100644 --- a/_worker.js +++ b/_worker.js @@ -1,9 +1,9 @@ import { AwsClient } from "aws4fetch"; -const HOMEPAGE = "https://github.com/milkey-mouse/git-lfs-s3-proxy"; +const HOMEPAGE = "https://github.com/aibtcdev/git-lfs-s3-proxy"; const EXPIRY = 3600; - const MIME = "application/vnd.git-lfs+json"; +const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for S3 const METHOD_FOR = { upload: "PUT", @@ -111,7 +111,7 @@ async function fetch(req, env) { status: 200, headers: { "Cache-Control": "no-store", - "Content-Type": "application/vnd.git-lfs+json", + "Content-Type": MIME, }, }); } From 7d21914638f6b66a14fe369f09f1fb5d94e99fc6 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 09:55:43 -0700 Subject: [PATCH 03/35] fix: add multipart functions courtesy of claude --- _worker.js | 91 +++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 80 insertions(+), 11 deletions(-) diff --git a/_worker.js b/_worker.js index beee8a4..5e8b3fd 100644 --- a/_worker.js +++ b/_worker.js @@ -40,6 +40,47 @@ function parseAuthorization(req) { return { user: decoded.slice(0, index), pass: decoded.slice(index + 1) }; } +async function initiateMultipartUpload(s3, bucket, key) { + const url = await sign(s3, bucket, key, "POST", "uploads="); + const response = await fetch(url); + const xml = await response.text(); + const uploadId = xml.match(/(.*?)<\/UploadId>/)[1]; + return uploadId; +} + +async function getSignedUrlForPart(s3, bucket, key, uploadId, partNumber) { + return sign( + s3, + bucket, + key, + "PUT", + `partNumber=${partNumber}&uploadId=${uploadId}&` + ); +} + +async function completeMultipartUpload(s3, bucket, key, uploadId, parts) { + const url = await sign(s3, bucket, key, "POST", `uploadId=${uploadId}&`); + const xml = ` + + ${parts + .map( + ({ PartNumber, ETag }) => ` + + ${PartNumber} + ${ETag} + + ` + ) + .join("")} + + `; + const response = await fetch(url, { + method: "POST", + body: xml, + }); + return response.ok; +} + async function fetch(req, env) { const url = new URL(req.url); @@ -93,17 +134,45 @@ async function fetch(req, env) { const response = JSON.stringify({ transfer: "basic", objects: await Promise.all( - objects.map(async ({ oid, size }) => ({ - oid, - size, - authenticated: true, - actions: { - [operation]: { - href: await sign(s3, bucket, oid, method), - expires_in, - }, - }, - })) + objects.map(async ({ oid, size }) => { + if (operation === "upload" && size > PART_SIZE) { + // initiate multipart upload + const uploadId = await initiateMultipartUpload(s3, bucket, oid); + const partCount = Math.ceil(size / PART_SIZE); + const parts = await Promise.all( + Array.from({ length: partCount }, (_, i) => + getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) + ) + ); + + return { + oid, + size, + authenticated: true, + actions: { + upload: { + href: parts[0], // URL for the first part + header: { + "Upload-ID": uploadId, + "Part-Count": partCount.toString(), + }, + }, + }, + }; + } else { + return { + oid, + size, + authenticated: true, + actions: { + [operation]: { + href: await sign(s3, bucket, oid, method), + expires_in: EXPIRY, + }, + }, + }; + } + }) ), }); From e56688450212caf154f3e34aa4bfa3f5eaf05b18 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 09:56:30 -0700 Subject: [PATCH 04/35] chore: use unused const --- _worker.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_worker.js b/_worker.js index 5e8b3fd..7fc1aec 100644 --- a/_worker.js +++ b/_worker.js @@ -167,7 +167,7 @@ async function fetch(req, env) { actions: { [operation]: { href: await sign(s3, bucket, oid, method), - expires_in: EXPIRY, + expires_in: expires_in, }, }, }; From 44f0c6c6b05cd9b7ce004c8e6ac23f1d85a5fdab Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:09:12 -0700 Subject: [PATCH 05/35] fix: use expires_in instead of EXPIRY const --- _worker.js | 84 +++++++++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/_worker.js b/_worker.js index 7fc1aec..845f4ce 100644 --- a/_worker.js +++ b/_worker.js @@ -10,10 +10,13 @@ const METHOD_FOR = { download: "GET", }; -async function sign(s3, bucket, path, method) { +async function sign(s3, bucket, path, method, query = "") { const info = { method }; const signed = await s3.sign( - new Request(`https://${bucket}/${path}?X-Amz-Expires=${EXPIRY}`, info), + new Request( + `https://${bucket}/${path}?${query}X-Amz-Expires=${EXPIRY}`, + info + ), { aws: { signQuery: true } } ); return signed.url; @@ -58,29 +61,9 @@ async function getSignedUrlForPart(s3, bucket, key, uploadId, partNumber) { ); } -async function completeMultipartUpload(s3, bucket, key, uploadId, parts) { - const url = await sign(s3, bucket, key, "POST", `uploadId=${uploadId}&`); - const xml = ` - - ${parts - .map( - ({ PartNumber, ETag }) => ` - - ${PartNumber} - ${ETag} - - ` - ) - .join("")} - - `; - const response = await fetch(url, { - method: "POST", - body: xml, - }); - return response.ok; +async function getSignedUrlForCompletion(s3, bucket, key, uploadId) { + return sign(s3, bucket, key, "POST", `uploadId=${uploadId}&`); } - async function fetch(req, env) { const url = new URL(req.url); @@ -100,12 +83,6 @@ async function fetch(req, env) { return new Response(null, { status: 405, headers: { Allow: "POST" } }); } - // in practice, we'd rather not break out-of-spec clients not setting these - /*if (!req.headers.get("Accept").startsWith(MIME) - || !req.headers.get("Content-Type").startsWith(MIME)) { - return new Response(null, { status: 406 }); - }*/ - const { user, pass } = parseAuthorization(req); let s3Options = { accessKeyId: user, secretAccessKey: pass }; @@ -120,7 +97,6 @@ async function fetch(req, env) { const key = decodeURIComponent(segment.slice(0, sliceIdx)); const val = decodeURIComponent(segment.slice(sliceIdx + 1)); s3Options[key] = val; - bucketIdx++; } } @@ -130,7 +106,6 @@ async function fetch(req, env) { const expires_in = params.expiry || env.EXPIRY || EXPIRY; const { objects, operation } = await req.json(); - const method = METHOD_FOR[operation]; const response = JSON.stringify({ transfer: "basic", objects: await Promise.all( @@ -139,35 +114,66 @@ async function fetch(req, env) { // initiate multipart upload const uploadId = await initiateMultipartUpload(s3, bucket, oid); const partCount = Math.ceil(size / PART_SIZE); - const parts = await Promise.all( + + // generate signed URLs for all parts + const partUrls = await Promise.all( Array.from({ length: partCount }, (_, i) => getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) ) ); + // generate signed URL for completing the multipart upload + const completeUrl = await getSignedUrlForCompletion( + s3, + bucket, + oid, + uploadId + ); + return { oid, size, authenticated: true, actions: { upload: { - href: parts[0], // URL for the first part + href: partUrls[0], + header: { + "Content-Type": "application/octet-stream", + }, + expires_in: expires_in, + }, + verify: { + href: completeUrl, header: { - "Upload-ID": uploadId, - "Part-Count": partCount.toString(), + "Content-Type": "application/xml", }, + expires_in: expires_in, }, }, + error: { + code: 202, + message: "Large file detected, using multipart upload", + }, }; } else { + const href = await sign( + s3, + bucket, + oid, + operation === "upload" ? "PUT" : "GET" + ); return { oid, size, authenticated: true, actions: { [operation]: { - href: await sign(s3, bucket, oid, method), - expires_in: expires_in, + href, + header: { + "Content-Type": + operation === "upload" ? "application/octet-stream" : "", + }, + expires_in, }, }, }; @@ -180,7 +186,7 @@ async function fetch(req, env) { status: 200, headers: { "Cache-Control": "no-store", - "Content-Type": MIME, + "Content-Type": "application/vnd.git-lfs+json", }, }); } From 85e1bff71eddbf826a47c24a6d23ed292a06ccb9 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:32:10 -0700 Subject: [PATCH 06/35] fix: add try/catch with specific errors --- _worker.js | 250 +++++++++++++++++++++++++++++------------------------ 1 file changed, 136 insertions(+), 114 deletions(-) diff --git a/_worker.js b/_worker.js index 845f4ce..e1e6f8c 100644 --- a/_worker.js +++ b/_worker.js @@ -65,130 +65,152 @@ async function getSignedUrlForCompletion(s3, bucket, key, uploadId) { return sign(s3, bucket, key, "POST", `uploadId=${uploadId}&`); } async function fetch(req, env) { - const url = new URL(req.url); - - if (url.pathname == "/") { - if (req.method === "GET") { - return Response.redirect(HOMEPAGE, 302); - } else { - return new Response(null, { status: 405, headers: { Allow: "GET" } }); + try { + const url = new URL(req.url); + + if (url.pathname == "/") { + if (req.method === "GET") { + return Response.redirect(HOMEPAGE, 302); + } else { + return new Response(null, { status: 405, headers: { Allow: "GET" } }); + } } - } - if (!url.pathname.endsWith("/objects/batch")) { - return new Response(null, { status: 404 }); - } + if (!url.pathname.endsWith("/objects/batch")) { + return new Response(null, { status: 404 }); + } - if (req.method !== "POST") { - return new Response(null, { status: 405, headers: { Allow: "POST" } }); - } + if (req.method !== "POST") { + return new Response(null, { status: 405, headers: { Allow: "POST" } }); + } - const { user, pass } = parseAuthorization(req); - let s3Options = { accessKeyId: user, secretAccessKey: pass }; - - const segments = url.pathname.split("/").slice(1, -2); - let params = {}; - let bucketIdx = 0; - for (const segment of segments) { - const sliceIdx = segment.indexOf("="); - if (sliceIdx === -1) { - break; - } else { - const key = decodeURIComponent(segment.slice(0, sliceIdx)); - const val = decodeURIComponent(segment.slice(sliceIdx + 1)); - s3Options[key] = val; - bucketIdx++; + const { user, pass } = parseAuthorization(req); + let s3Options = { accessKeyId: user, secretAccessKey: pass }; + + const segments = url.pathname.split("/").slice(1, -2); + let params = {}; + let bucketIdx = 0; + for (const segment of segments) { + const sliceIdx = segment.indexOf("="); + if (sliceIdx === -1) { + break; + } else { + const key = decodeURIComponent(segment.slice(0, sliceIdx)); + const val = decodeURIComponent(segment.slice(sliceIdx + 1)); + s3Options[key] = val; + bucketIdx++; + } } - } - const s3 = new AwsClient(s3Options); - const bucket = segments.slice(bucketIdx).join("/"); - const expires_in = params.expiry || env.EXPIRY || EXPIRY; - - const { objects, operation } = await req.json(); - const response = JSON.stringify({ - transfer: "basic", - objects: await Promise.all( - objects.map(async ({ oid, size }) => { - if (operation === "upload" && size > PART_SIZE) { - // initiate multipart upload - const uploadId = await initiateMultipartUpload(s3, bucket, oid); - const partCount = Math.ceil(size / PART_SIZE); - - // generate signed URLs for all parts - const partUrls = await Promise.all( - Array.from({ length: partCount }, (_, i) => - getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) - ) - ); - - // generate signed URL for completing the multipart upload - const completeUrl = await getSignedUrlForCompletion( - s3, - bucket, - oid, - uploadId - ); - - return { - oid, - size, - authenticated: true, - actions: { - upload: { - href: partUrls[0], - header: { - "Content-Type": "application/octet-stream", + const s3 = new AwsClient(s3Options); + const bucket = segments.slice(bucketIdx).join("/"); + const expires_in = params.expiry || env.EXPIRY || EXPIRY; + + const { objects, operation } = await req.json(); + + const response = JSON.stringify({ + transfer: "basic", + objects: await Promise.all( + objects.map(async ({ oid, size }) => { + try { + if (operation === "upload" && size > PART_SIZE) { + // initiate multipart upload + const uploadId = await initiateMultipartUpload(s3, bucket, oid); + const partCount = Math.ceil(size / PART_SIZE); + + // generate signed URLs for all parts + const partUrls = await Promise.all( + Array.from({ length: partCount }, (_, i) => + getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) + ) + ); + + // generate signed URL for completing the multipart upload + const completeUrl = await getSignedUrlForCompletion( + s3, + bucket, + oid, + uploadId + ); + + return { + oid, + size, + authenticated: true, + actions: { + upload: { + href: partUrls[0], + header: { + "Content-Type": "application/octet-stream", + }, + expires_in: expires_in, + }, + verify: { + href: completeUrl, + header: { + "Content-Type": "application/xml", + }, + expires_in: expires_in, + }, }, - expires_in: expires_in, - }, - verify: { - href: completeUrl, - header: { - "Content-Type": "application/xml", + error: { + code: 202, + message: "Large file detected, using multipart upload", }, - expires_in: expires_in, - }, - }, - error: { - code: 202, - message: "Large file detected, using multipart upload", - }, - }; - } else { - const href = await sign( - s3, - bucket, - oid, - operation === "upload" ? "PUT" : "GET" - ); - return { - oid, - size, - authenticated: true, - actions: { - [operation]: { - href, - header: { - "Content-Type": - operation === "upload" ? "application/octet-stream" : "", + }; + } else { + const href = await sign( + s3, + bucket, + oid, + operation === "upload" ? "PUT" : "GET" + ); + return { + oid, + size, + authenticated: true, + actions: { + [operation]: { + href, + header: { + "Content-Type": + operation === "upload" + ? "application/octet-stream" + : "", + }, + expires_in, + }, }, - expires_in, + }; + } + } catch (error) { + console.error(`Error processing object ${oid}:`, error); + return { + oid, + size, + error: { + message: "Internal server error processing object", }, - }, - }; - } - }) - ), - }); - - return new Response(response, { - status: 200, - headers: { - "Cache-Control": "no-store", - "Content-Type": "application/vnd.git-lfs+json", - }, - }); + }; + } + }) + ), + }); + + return new Response(response, { + status: 200, + headers: { + "Cache-Control": "no-store", + "Content-Type": "application/vnd.git-lfs+json", + }, + }); + } catch (error) { + console.error("Unexpected error:", error); + return new Response(JSON.stringify({ message: "Internal server error" }), { + status: 500, + headers: { "Content-Type": "application/json" }, + }); + } } export default { fetch }; From 10cf4fb4dde79fa55c630ed42709fb348a449934 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:33:31 -0700 Subject: [PATCH 07/35] chore: remove unused code --- _worker.js | 6 ------ 1 file changed, 6 deletions(-) diff --git a/_worker.js b/_worker.js index e1e6f8c..690ba16 100644 --- a/_worker.js +++ b/_worker.js @@ -2,14 +2,8 @@ import { AwsClient } from "aws4fetch"; const HOMEPAGE = "https://github.com/aibtcdev/git-lfs-s3-proxy"; const EXPIRY = 3600; -const MIME = "application/vnd.git-lfs+json"; const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for S3 -const METHOD_FOR = { - upload: "PUT", - download: "GET", -}; - async function sign(s3, bucket, path, method, query = "") { const info = { method }; const signed = await s3.sign( From b0f33a4504041bb916338200f7df8900655896f8 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:37:32 -0700 Subject: [PATCH 08/35] fix: move object processing outside of response --- _worker.js | 154 ++++++++++++++++++++++++++--------------------------- 1 file changed, 77 insertions(+), 77 deletions(-) diff --git a/_worker.js b/_worker.js index 690ba16..c8b144b 100644 --- a/_worker.js +++ b/_worker.js @@ -102,93 +102,93 @@ async function fetch(req, env) { const { objects, operation } = await req.json(); - const response = JSON.stringify({ - transfer: "basic", - objects: await Promise.all( - objects.map(async ({ oid, size }) => { - try { - if (operation === "upload" && size > PART_SIZE) { - // initiate multipart upload - const uploadId = await initiateMultipartUpload(s3, bucket, oid); - const partCount = Math.ceil(size / PART_SIZE); - - // generate signed URLs for all parts - const partUrls = await Promise.all( - Array.from({ length: partCount }, (_, i) => - getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) - ) - ); - - // generate signed URL for completing the multipart upload - const completeUrl = await getSignedUrlForCompletion( - s3, - bucket, - oid, - uploadId - ); - - return { - oid, - size, - authenticated: true, - actions: { - upload: { - href: partUrls[0], - header: { - "Content-Type": "application/octet-stream", - }, - expires_in: expires_in, - }, - verify: { - href: completeUrl, - header: { - "Content-Type": "application/xml", - }, - expires_in: expires_in, + const processedObjects = await Promise.all( + objects.map(async ({ oid, size }) => { + try { + if (operation === "upload" && size > PART_SIZE) { + // initiate multipart upload + const uploadId = await initiateMultipartUpload(s3, bucket, oid); + const partCount = Math.ceil(size / PART_SIZE); + + // generate signed URLs for all parts + const partUrls = await Promise.all( + Array.from({ length: partCount }, (_, i) => + getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) + ) + ); + + // generate signed URL for completing the multipart upload + const completeUrl = await getSignedUrlForCompletion( + s3, + bucket, + oid, + uploadId + ); + + return { + oid, + size, + authenticated: true, + actions: { + upload: { + href: partUrls[0], + header: { + "Content-Type": "application/octet-stream", }, + expires_in: expires_in, }, - error: { - code: 202, - message: "Large file detected, using multipart upload", - }, - }; - } else { - const href = await sign( - s3, - bucket, - oid, - operation === "upload" ? "PUT" : "GET" - ); - return { - oid, - size, - authenticated: true, - actions: { - [operation]: { - href, - header: { - "Content-Type": - operation === "upload" - ? "application/octet-stream" - : "", - }, - expires_in, + verify: { + href: completeUrl, + header: { + "Content-Type": "application/xml", }, + expires_in: expires_in, }, - }; - } - } catch (error) { - console.error(`Error processing object ${oid}:`, error); + }, + error: { + code: 202, + message: "Large file detected, using multipart upload", + }, + }; + } else { + const href = await sign( + s3, + bucket, + oid, + operation === "upload" ? "PUT" : "GET" + ); return { oid, size, - error: { - message: "Internal server error processing object", + authenticated: true, + actions: { + [operation]: { + href, + header: { + "Content-Type": + operation === "upload" ? "application/octet-stream" : "", + }, + expires_in, + }, }, }; } - }) - ), + } catch (error) { + console.error(`Error processing object ${oid}:`, error); + return { + oid, + size, + error: { + message: "Internal server error processing object", + }, + }; + } + }) + ); + + const response = JSON.stringify({ + transfer: "basic", + objects: processedObjects, }); return new Response(response, { From 643116e3545fe52de6b5f06ef3ee17923dfd594c Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:46:16 -0700 Subject: [PATCH 09/35] fix: add more robust error messages --- _worker.js | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/_worker.js b/_worker.js index c8b144b..553681c 100644 --- a/_worker.js +++ b/_worker.js @@ -200,10 +200,19 @@ async function fetch(req, env) { }); } catch (error) { console.error("Unexpected error:", error); - return new Response(JSON.stringify({ message: "Internal server error" }), { - status: 500, - headers: { "Content-Type": "application/json" }, - }); + if (error instanceof Response) { + // handle errors thrown as Response objects (e.g., 401, 400) + return error; + } else if (error.name === "AbortError") { + return new Response("Request timed out", { status: 504 }); + } else if (error.name === "TypeError") { + return new Response("Bad request format", { status: 400 }); + } else if (error.message.includes("NetworkError")) { + return new Response("Network error occurred", { status: 503 }); + } else { + // generic server error for unhandled cases + return new Response("Internal server error", { status: 500 }); + } } } From 6fa2134e7da2418875da6c8dbbc4ef1e16d0897e Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:53:39 -0700 Subject: [PATCH 10/35] fix: rework multipart upload logic --- _worker.js | 59 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/_worker.js b/_worker.js index 553681c..b3c91e1 100644 --- a/_worker.js +++ b/_worker.js @@ -45,6 +45,31 @@ async function initiateMultipartUpload(s3, bucket, key) { return uploadId; } +async function handleMultipartUpload(s3, bucket, oid, size) { + const uploadId = await initiateMultipartUpload(s3, bucket, oid); + const partCount = Math.ceil(size / PART_SIZE); + + const partUrls = await Promise.all( + Array.from({ length: partCount }, (_, i) => + getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) + ) + ); + + const completeUrl = await getSignedUrlForCompletion( + s3, + bucket, + oid, + uploadId + ); + + return { + uploadId, + partUrls, + completeUrl, + partCount, + }; +} + async function getSignedUrlForPart(s3, bucket, key, uploadId, partNumber) { return sign( s3, @@ -58,6 +83,7 @@ async function getSignedUrlForPart(s3, bucket, key, uploadId, partNumber) { async function getSignedUrlForCompletion(s3, bucket, key, uploadId) { return sign(s3, bucket, key, "POST", `uploadId=${uploadId}&`); } + async function fetch(req, env) { try { const url = new URL(req.url); @@ -106,37 +132,22 @@ async function fetch(req, env) { objects.map(async ({ oid, size }) => { try { if (operation === "upload" && size > PART_SIZE) { - // initiate multipart upload - const uploadId = await initiateMultipartUpload(s3, bucket, oid); - const partCount = Math.ceil(size / PART_SIZE); - - // generate signed URLs for all parts - const partUrls = await Promise.all( - Array.from({ length: partCount }, (_, i) => - getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) - ) - ); - - // generate signed URL for completing the multipart upload - const completeUrl = await getSignedUrlForCompletion( - s3, - bucket, - oid, - uploadId - ); + const { uploadId, partUrls, completeUrl, partCount } = + await handleMultipartUpload(s3, bucket, oid, size); return { oid, size, authenticated: true, actions: { - upload: { - href: partUrls[0], + upload: partUrls.map((url, index) => ({ + href: url, header: { "Content-Type": "application/octet-stream", }, expires_in: expires_in, - }, + partNumber: index + 1, + })), verify: { href: completeUrl, header: { @@ -145,9 +156,9 @@ async function fetch(req, env) { expires_in: expires_in, }, }, - error: { - code: 202, - message: "Large file detected, using multipart upload", + multipart: { + partSize: PART_SIZE, + partCount: partCount, }, }; } else { From f5029c643e3fff053073555f90dc65ed74f3cb13 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 10:56:03 -0700 Subject: [PATCH 11/35] fix: add more logging to mutlipart upload --- _worker.js | 55 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/_worker.js b/_worker.js index b3c91e1..a4020b4 100644 --- a/_worker.js +++ b/_worker.js @@ -46,28 +46,41 @@ async function initiateMultipartUpload(s3, bucket, key) { } async function handleMultipartUpload(s3, bucket, oid, size) { - const uploadId = await initiateMultipartUpload(s3, bucket, oid); - const partCount = Math.ceil(size / PART_SIZE); + try { + const uploadId = await initiateMultipartUpload(s3, bucket, oid); + const partCount = Math.ceil(size / PART_SIZE); - const partUrls = await Promise.all( - Array.from({ length: partCount }, (_, i) => - getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) - ) - ); + const partUrls = await Promise.all( + Array.from({ length: partCount }, (_, i) => + getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) + ) + ); - const completeUrl = await getSignedUrlForCompletion( - s3, - bucket, - oid, - uploadId - ); + const completeUrl = await getSignedUrlForCompletion( + s3, + bucket, + oid, + uploadId + ); - return { - uploadId, - partUrls, - completeUrl, - partCount, - }; + return { + uploadId, + partUrls, + completeUrl, + partCount, + }; + } catch (error) { + console.error(`Error in handleMultipartUpload for ${oid}:`, error); + if (error.name === "AbortError") { + throw new Error("Multipart upload initialization timed out"); + } else if (error.message.includes("InvalidAccessKeyId")) { + throw new Error("Invalid S3 credentials"); + } else if (error.message.includes("NoSuchBucket")) { + throw new Error("S3 bucket not found"); + } else { + throw new Error("Failed to initialize multipart upload"); + } + } } async function getSignedUrlForPart(s3, bucket, key, uploadId, partNumber) { @@ -159,6 +172,7 @@ async function fetch(req, env) { multipart: { partSize: PART_SIZE, partCount: partCount, + uploadId: uploadId, }, }; } else { @@ -190,7 +204,8 @@ async function fetch(req, env) { oid, size, error: { - message: "Internal server error processing object", + message: + error.message || "Internal server error processing object", }, }; } From 8237cfdba65fbf89d91600ecb03912658579ff5b Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 11:18:48 -0700 Subject: [PATCH 12/35] fix: add more specific errors for multipart --- _worker.js | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/_worker.js b/_worker.js index a4020b4..0dd3796 100644 --- a/_worker.js +++ b/_worker.js @@ -38,11 +38,31 @@ function parseAuthorization(req) { } async function initiateMultipartUpload(s3, bucket, key) { - const url = await sign(s3, bucket, key, "POST", "uploads="); - const response = await fetch(url); - const xml = await response.text(); - const uploadId = xml.match(/(.*?)<\/UploadId>/)[1]; - return uploadId; + try { + const url = await sign(s3, bucket, key, "POST", "uploads="); + const response = await fetch(url); + + if (!response.ok) { + const errorText = await response.text(); + console.error(`S3 error response: ${errorText}`); + throw new Error( + `S3 responded with status ${response.status}: ${errorText}` + ); + } + + const xml = await response.text(); + const uploadId = xml.match(/(.*?)<\/UploadId>/)[1]; + if (!uploadId) { + throw new Error("Failed to extract UploadId from S3 response"); + } + return uploadId; + } catch (error) { + console.error( + `Error in initiateMultipartUpload for bucket ${bucket}, key ${key}:`, + error + ); + throw error; + } } async function handleMultipartUpload(s3, bucket, oid, size) { @@ -73,12 +93,18 @@ async function handleMultipartUpload(s3, bucket, oid, size) { console.error(`Error in handleMultipartUpload for ${oid}:`, error); if (error.name === "AbortError") { throw new Error("Multipart upload initialization timed out"); - } else if (error.message.includes("InvalidAccessKeyId")) { - throw new Error("Invalid S3 credentials"); + } else if (error.message.includes("AccessDenied")) { + throw new Error("Access denied. Check S3 credentials and permissions."); } else if (error.message.includes("NoSuchBucket")) { - throw new Error("S3 bucket not found"); + throw new Error("S3 bucket not found. Check bucket name and region."); + } else if (error.message.includes("NetworkError")) { + throw new Error( + "Network error. Check your internet connection and try again." + ); } else { - throw new Error("Failed to initialize multipart upload"); + throw new Error( + `Failed to initialize multipart upload: ${error.message}` + ); } } } From a442f849fa9909d1cd38b33adc495c4cd126abc9 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 11:25:35 -0700 Subject: [PATCH 13/35] fix: update url format --- _worker.js | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/_worker.js b/_worker.js index 0dd3796..7141cb3 100644 --- a/_worker.js +++ b/_worker.js @@ -5,14 +5,24 @@ const EXPIRY = 3600; const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for S3 async function sign(s3, bucket, path, method, query = "") { - const info = { method }; - const signed = await s3.sign( - new Request( - `https://${bucket}/${path}?${query}X-Amz-Expires=${EXPIRY}`, - info - ), - { aws: { signQuery: true } } - ); + const encodedPath = encodeURIComponent(path).replace(/%2F/g, "/"); + const url = `https://${bucket}/${encodedPath}`; + const fullUrl = query ? `${url}?${query}` : url; + + const info = { + method, + headers: { + "x-amz-content-sha256": "UNSIGNED-PAYLOAD", + }, + }; + + console.log(`Signing request: ${method} ${fullUrl}`); + + const signed = await s3.sign(new Request(fullUrl, info), { + aws: { signQuery: true }, + }); + + console.log(`Signed URL: ${signed.url}`); return signed.url; } @@ -39,18 +49,28 @@ function parseAuthorization(req) { async function initiateMultipartUpload(s3, bucket, key) { try { - const url = await sign(s3, bucket, key, "POST", "uploads="); - const response = await fetch(url); + const url = await sign(s3, bucket, key, "POST", "uploads"); + console.log(`Initiating multipart upload: POST ${url}`); + + const response = await fetch(url, { + method: "POST", + headers: { + "x-amz-content-sha256": "UNSIGNED-PAYLOAD", + }, + }); if (!response.ok) { const errorText = await response.text(); console.error(`S3 error response: ${errorText}`); + console.error(`Response headers:`, response.headers); throw new Error( `S3 responded with status ${response.status}: ${errorText}` ); } const xml = await response.text(); + console.log(`S3 response:`, xml); + const uploadId = xml.match(/(.*?)<\/UploadId>/)[1]; if (!uploadId) { throw new Error("Failed to extract UploadId from S3 response"); From 2bcfbd769805b25218284fff6736de38ffaef54a Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 12:56:03 -0700 Subject: [PATCH 14/35] fix: encode query params with url --- _worker.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/_worker.js b/_worker.js index 7141cb3..06df369 100644 --- a/_worker.js +++ b/_worker.js @@ -6,8 +6,8 @@ const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for S3 async function sign(s3, bucket, path, method, query = "") { const encodedPath = encodeURIComponent(path).replace(/%2F/g, "/"); - const url = `https://${bucket}/${encodedPath}`; - const fullUrl = query ? `${url}?${query}` : url; + const encodedQuery = query ? `?${encodeURIComponent(query)}` : ""; + const url = `https://${bucket}/${encodedPath}${encodedQuery}`; const info = { method, @@ -16,9 +16,9 @@ async function sign(s3, bucket, path, method, query = "") { }, }; - console.log(`Signing request: ${method} ${fullUrl}`); + console.log(`Signing request: ${method} ${url}`); - const signed = await s3.sign(new Request(fullUrl, info), { + const signed = await s3.sign(new Request(url, info), { aws: { signQuery: true }, }); From 9bc252007ad1e0d7851221d36347994c3217b50f Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 13:03:34 -0700 Subject: [PATCH 15/35] fix: use oid to create key --- _worker.js | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/_worker.js b/_worker.js index 06df369..8df8e96 100644 --- a/_worker.js +++ b/_worker.js @@ -85,21 +85,21 @@ async function initiateMultipartUpload(s3, bucket, key) { } } -async function handleMultipartUpload(s3, bucket, oid, size) { +async function handleMultipartUpload(s3, bucket, key, size) { try { - const uploadId = await initiateMultipartUpload(s3, bucket, oid); + const uploadId = await initiateMultipartUpload(s3, bucket, key); const partCount = Math.ceil(size / PART_SIZE); const partUrls = await Promise.all( Array.from({ length: partCount }, (_, i) => - getSignedUrlForPart(s3, bucket, oid, uploadId, i + 1) + getSignedUrlForPart(s3, bucket, key, uploadId, i + 1) ) ); const completeUrl = await getSignedUrlForCompletion( s3, bucket, - oid, + key, uploadId ); @@ -182,7 +182,9 @@ async function fetch(req, env) { } const s3 = new AwsClient(s3Options); - const bucket = segments.slice(bucketIdx).join("/"); + // const bucket = segments.slice(bucketIdx).join("/"); + const bucket = segments[bucketIdx]; // 'bucket.aibtc.dev' + const prefix = segments.slice(bucketIdx + 1).join("/"); // 'aibtcdev-communications' const expires_in = params.expiry || env.EXPIRY || EXPIRY; const { objects, operation } = await req.json(); @@ -191,8 +193,9 @@ async function fetch(req, env) { objects.map(async ({ oid, size }) => { try { if (operation === "upload" && size > PART_SIZE) { + const key = `${prefix}/${oid}`; const { uploadId, partUrls, completeUrl, partCount } = - await handleMultipartUpload(s3, bucket, oid, size); + await handleMultipartUpload(s3, bucket, key, size); return { oid, @@ -225,7 +228,7 @@ async function fetch(req, env) { const href = await sign( s3, bucket, - oid, + key, operation === "upload" ? "PUT" : "GET" ); return { From a66b8b5f89b06a02278b43f3ebcbd668c5fa71d0 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 13:09:33 -0700 Subject: [PATCH 16/35] fix: update one last oid/key ref --- _worker.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_worker.js b/_worker.js index 8df8e96..b9b5e3e 100644 --- a/_worker.js +++ b/_worker.js @@ -110,7 +110,7 @@ async function handleMultipartUpload(s3, bucket, key, size) { partCount, }; } catch (error) { - console.error(`Error in handleMultipartUpload for ${oid}:`, error); + console.error(`Error in handleMultipartUpload for ${key}:`, error); if (error.name === "AbortError") { throw new Error("Multipart upload initialization timed out"); } else if (error.message.includes("AccessDenied")) { From 4e2c117de9fddd9ebd862e29ee2ca6d49f754f39 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Tue, 30 Jul 2024 13:14:55 -0700 Subject: [PATCH 17/35] fix: add logging to test url format Is the issue with the URL or the library? --- _worker.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/_worker.js b/_worker.js index b9b5e3e..3909276 100644 --- a/_worker.js +++ b/_worker.js @@ -52,6 +52,16 @@ async function initiateMultipartUpload(s3, bucket, key) { const url = await sign(s3, bucket, key, "POST", "uploads"); console.log(`Initiating multipart upload: POST ${url}`); + let newUrl = ""; + try { + newUrl = new URL(url); + } catch (error) { + console.error( + `Error in initiateMultipartUpload for bucket ${bucket}, key ${key} for URL ${url}:`, + error + ); + throw error; + } const response = await fetch(url, { method: "POST", headers: { From e0a7745c27bbdb85ba866499d34cad9c54be7d41 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 13:11:37 -0700 Subject: [PATCH 18/35] chore: add more logging Getting error 400 bad request when calling endpoint from git-lfs config. --- _worker.js | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/_worker.js b/_worker.js index 3909276..a573d5a 100644 --- a/_worker.js +++ b/_worker.js @@ -113,6 +113,16 @@ async function handleMultipartUpload(s3, bucket, key, size) { uploadId ); + console.log("Multipart upload initialized:", { + uploadId, + partCount, + partUrls, + completeUrl, + s3, + bucket, + size, + }); + return { uploadId, partUrls, @@ -197,6 +207,10 @@ async function fetch(req, env) { const prefix = segments.slice(bucketIdx + 1).join("/"); // 'aibtcdev-communications' const expires_in = params.expiry || env.EXPIRY || EXPIRY; + console.log(`Bucket: ${bucket}`); + console.log(`Prefix: ${prefix}`); + console.log(`Expires in: ${expires_in}`); + const { objects, operation } = await req.json(); const processedObjects = await Promise.all( @@ -276,6 +290,15 @@ async function fetch(req, env) { objects: processedObjects, }); + console.log(`Bucket: ${bucket}, Key: ${key}`); + console.log(`Signed URL: ${url}`); + console.log(`Request headers:`, { + method: "POST", + headers: { + "x-amz-content-sha256": "UNSIGNED-PAYLOAD", + }, + }); + return new Response(response, { status: 200, headers: { From 4d883c5803e407b367387c079659cb8a95acd25f Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 13:25:34 -0700 Subject: [PATCH 19/35] fix: add moar logging --- _worker.js | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/_worker.js b/_worker.js index a573d5a..84d33a9 100644 --- a/_worker.js +++ b/_worker.js @@ -16,52 +16,56 @@ async function sign(s3, bucket, path, method, query = "") { }, }; - console.log(`Signing request: ${method} ${url}`); - const signed = await s3.sign(new Request(url, info), { aws: { signQuery: true }, }); - console.log(`Signed URL: ${signed.url}`); + // logging all in one place + console.log("=== sign function ==="); + console.log(`s3: ${s3}`); + console.log(`bucket: ${bucket}`); + console.log(`path: ${path}`); + console.log(`method: ${method}`); + console.log(`query: ${query}`); + console.log(`url: ${url}`); + console.log(`info: ${info}`); + console.log(`signed URL: ${signed.url}`); + return signed.url; } function parseAuthorization(req) { const auth = req.headers.get("Authorization"); if (!auth) { - throw new Response(null, { status: 401 }); + throw new Response("Authorization header not found", { status: 401 }); } const [scheme, encoded] = auth.split(" "); if (scheme !== "Basic" || !encoded) { - throw new Response(null, { status: 400 }); + throw new Response("Invalid authorization scheme or credentials", { + status: 400, + }); } const buffer = Uint8Array.from(atob(encoded), (c) => c.charCodeAt(0)); const decoded = new TextDecoder().decode(buffer).normalize(); const index = decoded.indexOf(":"); if (index === -1 || /[\0-\x1F\x7F]/.test(decoded)) { - throw new Response(null, { status: 400 }); + throw new Response("Unable to decode authorization", { status: 400 }); } + console.log(`=== parseAuthorization ===`); + console.log(`auth: ${auth}`); + console.log(`decoded: ${decoded}`); + return { user: decoded.slice(0, index), pass: decoded.slice(index + 1) }; } async function initiateMultipartUpload(s3, bucket, key) { + console.log("=== initiate multipart upload ==="); try { const url = await sign(s3, bucket, key, "POST", "uploads"); - console.log(`Initiating multipart upload: POST ${url}`); - - let newUrl = ""; - try { - newUrl = new URL(url); - } catch (error) { - console.error( - `Error in initiateMultipartUpload for bucket ${bucket}, key ${key} for URL ${url}:`, - error - ); - throw error; - } + const response = await fetch(url, { method: "POST", headers: { @@ -74,7 +78,7 @@ async function initiateMultipartUpload(s3, bucket, key) { console.error(`S3 error response: ${errorText}`); console.error(`Response headers:`, response.headers); throw new Error( - `S3 responded with status ${response.status}: ${errorText}` + `S3 responded with status ${response.status}: ${errorText}, headers: ${response.headers}` ); } @@ -85,6 +89,11 @@ async function initiateMultipartUpload(s3, bucket, key) { if (!uploadId) { throw new Error("Failed to extract UploadId from S3 response"); } + + console.log(`bucket: ${bucket}`); + console.log(`key: ${key}`); + console.log(`uploadId: ${uploadId}`); + return uploadId; } catch (error) { console.error( @@ -96,6 +105,7 @@ async function initiateMultipartUpload(s3, bucket, key) { } async function handleMultipartUpload(s3, bucket, key, size) { + console.log("=== handleMultipartUpload ==="); try { const uploadId = await initiateMultipartUpload(s3, bucket, key); const partCount = Math.ceil(size / PART_SIZE); From 2cc60864234feb1e818c497e9c5fdecc30fd524c Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 13:34:19 -0700 Subject: [PATCH 20/35] fix: log from single call with multiline Easier to read in streaming output of CF console --- _worker.js | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/_worker.js b/_worker.js index 84d33a9..23a624e 100644 --- a/_worker.js +++ b/_worker.js @@ -21,15 +21,12 @@ async function sign(s3, bucket, path, method, query = "") { }); // logging all in one place - console.log("=== sign function ==="); - console.log(`s3: ${s3}`); - console.log(`bucket: ${bucket}`); - console.log(`path: ${path}`); - console.log(`method: ${method}`); - console.log(`query: ${query}`); - console.log(`url: ${url}`); - console.log(`info: ${info}`); - console.log(`signed URL: ${signed.url}`); + console.log(`=== sign function ===\n + s3: ${JSON.stringify(s3)}\n + bucket: ${bucket}, path: ${path}, method: ${method}, query: ${query}\n + url: ${url}\n + info: ${JSON.stringify(info)}\n + signed URL: ${signed.url}`); return signed.url; } @@ -78,7 +75,9 @@ async function initiateMultipartUpload(s3, bucket, key) { console.error(`S3 error response: ${errorText}`); console.error(`Response headers:`, response.headers); throw new Error( - `S3 responded with status ${response.status}: ${errorText}, headers: ${response.headers}` + `S3 responded with status ${ + response.status + }: ${errorText}, headers: ${JSON.stringify(response.headers)}` ); } @@ -217,9 +216,11 @@ async function fetch(req, env) { const prefix = segments.slice(bucketIdx + 1).join("/"); // 'aibtcdev-communications' const expires_in = params.expiry || env.EXPIRY || EXPIRY; - console.log(`Bucket: ${bucket}`); - console.log(`Prefix: ${prefix}`); - console.log(`Expires in: ${expires_in}`); + console.log(`=== fetch info ===\n + s3Options: ${JSON.stringify(s3Options)}\n + bucket: ${bucket}\n + prefix: ${prefix}\n + Expires in: ${expires_in}`); const { objects, operation } = await req.json(); From 159802914a49843ed9d723fc07ea7b07b980196a Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 13:47:36 -0700 Subject: [PATCH 21/35] fix: more logging and testing --- _worker.js | 92 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 47 insertions(+), 45 deletions(-) diff --git a/_worker.js b/_worker.js index 23a624e..9c33116 100644 --- a/_worker.js +++ b/_worker.js @@ -21,12 +21,16 @@ async function sign(s3, bucket, path, method, query = "") { }); // logging all in one place - console.log(`=== sign function ===\n - s3: ${JSON.stringify(s3)}\n - bucket: ${bucket}, path: ${path}, method: ${method}, query: ${query}\n - url: ${url}\n - info: ${JSON.stringify(info)}\n - signed URL: ${signed.url}`); + console.log("=== sign function ===", { + s3: JSON.stringify(s3), + bucket, + path, + method, + query, + url, + info: JSON.stringify(info), + signedUrl: signed.url, + }); return signed.url; } @@ -51,17 +55,16 @@ function parseAuthorization(req) { throw new Response("Unable to decode authorization", { status: 400 }); } - console.log(`=== parseAuthorization ===`); - console.log(`auth: ${auth}`); - console.log(`decoded: ${decoded}`); - return { user: decoded.slice(0, index), pass: decoded.slice(index + 1) }; } -async function initiateMultipartUpload(s3, bucket, key) { - console.log("=== initiate multipart upload ==="); +async function initiateMultipartUpload(s3, bucket, prefix, oid) { + const key = `${prefix}/${oid}`; + console.log("=== initiateMultipartUpload ===", { bucket, key }); + try { const url = await sign(s3, bucket, key, "POST", "uploads"); + console.log(`Initiating multipart upload: POST ${url}`); const response = await fetch(url, { method: "POST", @@ -70,29 +73,21 @@ async function initiateMultipartUpload(s3, bucket, key) { }, }); + console.log(`Response status: ${response.status}`); + console.log(`Response headers:`, Object.fromEntries(response.headers)); + const responseBody = await response.text(); + console.log(`Response body: ${responseBody}`); + if (!response.ok) { - const errorText = await response.text(); - console.error(`S3 error response: ${errorText}`); - console.error(`Response headers:`, response.headers); throw new Error( - `S3 responded with status ${ - response.status - }: ${errorText}, headers: ${JSON.stringify(response.headers)}` + `S3 responded with status ${response.status}: ${responseBody}` ); } - const xml = await response.text(); - console.log(`S3 response:`, xml); - - const uploadId = xml.match(/(.*?)<\/UploadId>/)[1]; + const uploadId = responseBody.match(/(.*?)<\/UploadId>/)[1]; if (!uploadId) { throw new Error("Failed to extract UploadId from S3 response"); } - - console.log(`bucket: ${bucket}`); - console.log(`key: ${key}`); - console.log(`uploadId: ${uploadId}`); - return uploadId; } catch (error) { console.error( @@ -103,22 +98,27 @@ async function initiateMultipartUpload(s3, bucket, key) { } } -async function handleMultipartUpload(s3, bucket, key, size) { - console.log("=== handleMultipartUpload ==="); +async function handleMultipartUpload(s3, bucket, prefix, oid, size) { + console.log("=== handleMultipartUpload ===", { + bucket, + prefix, + oid, + size, + }); try { - const uploadId = await initiateMultipartUpload(s3, bucket, key); + const uploadId = await initiateMultipartUpload(s3, bucket, prefix, oid); const partCount = Math.ceil(size / PART_SIZE); const partUrls = await Promise.all( Array.from({ length: partCount }, (_, i) => - getSignedUrlForPart(s3, bucket, key, uploadId, i + 1) + getSignedUrlForPart(s3, bucket, `${prefix}/${oid}`, uploadId, i + 1) ) ); const completeUrl = await getSignedUrlForCompletion( s3, bucket, - key, + `${prefix}/${oid}`, uploadId ); @@ -127,9 +127,6 @@ async function handleMultipartUpload(s3, bucket, key, size) { partCount, partUrls, completeUrl, - s3, - bucket, - size, }); return { @@ -139,7 +136,10 @@ async function handleMultipartUpload(s3, bucket, key, size) { partCount, }; } catch (error) { - console.error(`Error in handleMultipartUpload for ${key}:`, error); + console.error( + `Error in handleMultipartUpload for ${prefix}/${oid}:`, + error + ); if (error.name === "AbortError") { throw new Error("Multipart upload initialization timed out"); } else if (error.message.includes("AccessDenied")) { @@ -152,7 +152,7 @@ async function handleMultipartUpload(s3, bucket, key, size) { ); } else { throw new Error( - `Failed to initialize multipart upload: ${error.message}` + `Failed to initialize multipart upload, unknown error: ${error.message}` ); } } @@ -216,11 +216,14 @@ async function fetch(req, env) { const prefix = segments.slice(bucketIdx + 1).join("/"); // 'aibtcdev-communications' const expires_in = params.expiry || env.EXPIRY || EXPIRY; - console.log(`=== fetch info ===\n - s3Options: ${JSON.stringify(s3Options)}\n - bucket: ${bucket}\n - prefix: ${prefix}\n - Expires in: ${expires_in}`); + console.log("=== fetch ===", { + user, + pass, + s3Options: JSON.stringify(s3Options), + bucket, + prefix, + expires_in, + }); const { objects, operation } = await req.json(); @@ -228,9 +231,8 @@ async function fetch(req, env) { objects.map(async ({ oid, size }) => { try { if (operation === "upload" && size > PART_SIZE) { - const key = `${prefix}/${oid}`; const { uploadId, partUrls, completeUrl, partCount } = - await handleMultipartUpload(s3, bucket, key, size); + await handleMultipartUpload(s3, bucket, prefix, oid, size); return { oid, @@ -263,7 +265,7 @@ async function fetch(req, env) { const href = await sign( s3, bucket, - key, + `${prefix}/${oid}`, operation === "upload" ? "PUT" : "GET" ); return { From a0485382febb143c2d776cb2410a24dc1dcc207c Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:04:58 -0700 Subject: [PATCH 22/35] fix: narrowing down the issue --- _worker.js | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/_worker.js b/_worker.js index 9c33116..8428ae7 100644 --- a/_worker.js +++ b/_worker.js @@ -63,20 +63,23 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { console.log("=== initiateMultipartUpload ===", { bucket, key }); try { - const url = await sign(s3, bucket, key, "POST", "uploads"); - console.log(`Initiating multipart upload: POST ${url}`); + const signedUrl = await sign(s3, bucket, key, "POST", "uploads"); + console.log(`Initiating multipart upload: POST ${signedUrl}`); - const response = await fetch(url, { + const response = await fetch(signedUrl, { method: "POST", headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", + "Content-Type": "application/octet-stream", }, }); - - console.log(`Response status: ${response.status}`); - console.log(`Response headers:`, Object.fromEntries(response.headers)); const responseBody = await response.text(); - console.log(`Response body: ${responseBody}`); + + console.log("=== Response Info ===", { + status: response.status, + headers: Object.fromEntries(response.headers), + body: responseBody, + }); if (!response.ok) { throw new Error( @@ -303,15 +306,6 @@ async function fetch(req, env) { objects: processedObjects, }); - console.log(`Bucket: ${bucket}, Key: ${key}`); - console.log(`Signed URL: ${url}`); - console.log(`Request headers:`, { - method: "POST", - headers: { - "x-amz-content-sha256": "UNSIGNED-PAYLOAD", - }, - }); - return new Response(response, { status: 200, headers: { From ee3fda0bec4a3012531568bc49275013fa709e68 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:05:54 -0700 Subject: [PATCH 23/35] docs: add mermaid diagram of worker ops --- README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/README.md b/README.md index dffc882..5132a9e 100644 --- a/README.md +++ b/README.md @@ -167,3 +167,33 @@ You're now ready to [start using Git LFS](https://github.com/git-lfs/git-lfs#exa git fetch --all git lfs migrate import --everything --above=25MiB git push --all --force-with-lease + +## Operation Diagram + +```mermaidjs +sequenceDiagram + participant Client as Git LFS Client + participant Fetch as fetch function + participant Parse as parseAuthorization + participant Handle as handleMultipartUpload + participant Initiate as initiateMultipartUpload + participant Sign as sign function + participant S3 as S3/R2 API + + Client->>Fetch: POST /objects/batch + Fetch->>Parse: Parse Authorization Header + Parse-->>Fetch: Return credentials + Fetch->>Handle: For each large object + Handle->>Initiate: Start multipart upload + Initiate->>Sign: Generate signed URL + Sign-->>Initiate: Return signed URL + Initiate->>S3: POST initiate multipart upload + S3-->>Initiate: Return UploadId + Initiate-->>Handle: Return UploadId + Handle->>Sign: Generate part URLs + Sign-->>Handle: Return part URLs + Handle->>Sign: Generate complete URL + Sign-->>Handle: Return complete URL + Handle-->>Fetch: Return upload details + Fetch-->>Client: Return batch response +``` From 85b0d4054d1cb224493b096ec4d55d837418b9f5 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:13:15 -0700 Subject: [PATCH 24/35] chore: cleanup naming, use R2 over S3 --- _worker.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/_worker.js b/_worker.js index 8428ae7..25cf010 100644 --- a/_worker.js +++ b/_worker.js @@ -2,7 +2,7 @@ import { AwsClient } from "aws4fetch"; const HOMEPAGE = "https://github.com/aibtcdev/git-lfs-s3-proxy"; const EXPIRY = 3600; -const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for S3 +const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for R2 async function sign(s3, bucket, path, method, query = "") { const encodedPath = encodeURIComponent(path).replace(/%2F/g, "/"); @@ -83,13 +83,13 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { if (!response.ok) { throw new Error( - `S3 responded with status ${response.status}: ${responseBody}` + `R2 responded with status ${response.status}: ${responseBody}` ); } const uploadId = responseBody.match(/(.*?)<\/UploadId>/)[1]; if (!uploadId) { - throw new Error("Failed to extract UploadId from S3 response"); + throw new Error("Failed to extract UploadId from R2 response"); } return uploadId; } catch (error) { @@ -146,9 +146,9 @@ async function handleMultipartUpload(s3, bucket, prefix, oid, size) { if (error.name === "AbortError") { throw new Error("Multipart upload initialization timed out"); } else if (error.message.includes("AccessDenied")) { - throw new Error("Access denied. Check S3 credentials and permissions."); + throw new Error("Access denied. Check R2 credentials and permissions."); } else if (error.message.includes("NoSuchBucket")) { - throw new Error("S3 bucket not found. Check bucket name and region."); + throw new Error("R2 bucket not found. Check bucket name and region."); } else if (error.message.includes("NetworkError")) { throw new Error( "Network error. Check your internet connection and try again." From cbfda2873e3a6523973fac4e2c6104f2d3eacc68 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:17:13 -0700 Subject: [PATCH 25/35] fix: adjust some URL formation --- _worker.js | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/_worker.js b/_worker.js index 25cf010..60ccc23 100644 --- a/_worker.js +++ b/_worker.js @@ -6,7 +6,7 @@ const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for R2 async function sign(s3, bucket, path, method, query = "") { const encodedPath = encodeURIComponent(path).replace(/%2F/g, "/"); - const encodedQuery = query ? `?${encodeURIComponent(query)}` : ""; + const encodedQuery = query ? `?${query}` : ""; const url = `https://${bucket}/${encodedPath}${encodedQuery}`; const info = { @@ -63,8 +63,15 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { console.log("=== initiateMultipartUpload ===", { bucket, key }); try { - const signedUrl = await sign(s3, bucket, key, "POST", "uploads"); - console.log(`Initiating multipart upload: POST ${signedUrl}`); + const signedUrl = await sign(s3, bucket, key, "POST", "uploads="); + console.log("Initiating multipart upload request:", { + method: "POST", + url: signedUrl, + headers: { + "x-amz-content-sha256": "UNSIGNED-PAYLOAD", + "Content-Type": "application/octet-stream", + }, + }); const response = await fetch(signedUrl, { method: "POST", @@ -97,6 +104,9 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { `Error in initiateMultipartUpload for bucket ${bucket}, key ${key}:`, error ); + if (error.response) { + console.error("Response data:", await error.response.text()); + } throw error; } } From faca6a8caf28772ae01684a32c8215a60355167d Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:22:25 -0700 Subject: [PATCH 26/35] fix: encode URL before fetch --- _worker.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/_worker.js b/_worker.js index 60ccc23..da7cc8c 100644 --- a/_worker.js +++ b/_worker.js @@ -73,7 +73,9 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { }, }); - const response = await fetch(signedUrl, { + const encodedUrl = encodeURI(signedUrl); + + const response = await fetch(encodedUrl, { method: "POST", headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", From 9c79d9fbd5ebc8aaa3e5825d8b48495a09217fe9 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:25:33 -0700 Subject: [PATCH 27/35] fix: dont encode url twice --- _worker.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/_worker.js b/_worker.js index da7cc8c..9c298d6 100644 --- a/_worker.js +++ b/_worker.js @@ -5,7 +5,7 @@ const EXPIRY = 3600; const PART_SIZE = 5 * 1024 * 1024; // 5MB minimum part size for R2 async function sign(s3, bucket, path, method, query = "") { - const encodedPath = encodeURIComponent(path).replace(/%2F/g, "/"); + const encodedPath = path.split("/").map(encodeURIComponent).join("/"); const encodedQuery = query ? `?${query}` : ""; const url = `https://${bucket}/${encodedPath}${encodedQuery}`; @@ -88,6 +88,7 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { status: response.status, headers: Object.fromEntries(response.headers), body: responseBody, + encodedUrl, }); if (!response.ok) { From 13a2d562d91b102c44ac228fe2d46c50b1971aad Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:33:05 -0700 Subject: [PATCH 28/35] fix: chasing down something small --- _worker.js | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/_worker.js b/_worker.js index 9c298d6..67bb1f5 100644 --- a/_worker.js +++ b/_worker.js @@ -64,23 +64,26 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { try { const signedUrl = await sign(s3, bucket, key, "POST", "uploads="); + const encodedUrl = encodeURI(signedUrl); + console.log("Initiating multipart upload request:", { method: "POST", - url: signedUrl, + signedUrl, + encodedUrl, headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", "Content-Type": "application/octet-stream", }, }); - const encodedUrl = encodeURI(signedUrl); - const response = await fetch(encodedUrl, { method: "POST", headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", "Content-Type": "application/octet-stream", }, + }).catch((error) => { + console.error("Error in fetch for initiateMultipartUpload:", error); }); const responseBody = await response.text(); From 68aacb61c134ff81fe4c07a97aba125f1c2a6310 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:38:03 -0700 Subject: [PATCH 29/35] fix: was still double encoding --- _worker.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_worker.js b/_worker.js index 67bb1f5..cb77841 100644 --- a/_worker.js +++ b/_worker.js @@ -64,19 +64,19 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { try { const signedUrl = await sign(s3, bucket, key, "POST", "uploads="); - const encodedUrl = encodeURI(signedUrl); + // const encodedUrl = encodeURI(signedUrl); console.log("Initiating multipart upload request:", { method: "POST", signedUrl, - encodedUrl, + // encodedUrl, headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", "Content-Type": "application/octet-stream", }, }); - const response = await fetch(encodedUrl, { + const response = await fetch(signedUrl, { method: "POST", headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", From 4ef61e1e55234c69fb4269e777c80ab9b516d8a1 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:44:43 -0700 Subject: [PATCH 30/35] fix: something with the url --- _worker.js | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/_worker.js b/_worker.js index cb77841..cf70ff9 100644 --- a/_worker.js +++ b/_worker.js @@ -64,19 +64,17 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { try { const signedUrl = await sign(s3, bucket, key, "POST", "uploads="); + const urlObj = new URL(signedUrl); // const encodedUrl = encodeURI(signedUrl); console.log("Initiating multipart upload request:", { method: "POST", signedUrl, + url: urlObj.toString(), // encodedUrl, - headers: { - "x-amz-content-sha256": "UNSIGNED-PAYLOAD", - "Content-Type": "application/octet-stream", - }, }); - const response = await fetch(signedUrl, { + const response = await fetch(url.toString(), { method: "POST", headers: { "x-amz-content-sha256": "UNSIGNED-PAYLOAD", @@ -91,7 +89,7 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { status: response.status, headers: Object.fromEntries(response.headers), body: responseBody, - encodedUrl, + // encodedUrl, }); if (!response.ok) { From ca97518f9b0484f10a968b7806c11288bb80508c Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:47:02 -0700 Subject: [PATCH 31/35] chore: cleanup typos --- _worker.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_worker.js b/_worker.js index cf70ff9..eaad406 100644 --- a/_worker.js +++ b/_worker.js @@ -64,13 +64,13 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { try { const signedUrl = await sign(s3, bucket, key, "POST", "uploads="); - const urlObj = new URL(signedUrl); + const url = new URL(signedUrl); // const encodedUrl = encodeURI(signedUrl); console.log("Initiating multipart upload request:", { method: "POST", signedUrl, - url: urlObj.toString(), + url, // encodedUrl, }); From 1ee39fc77df576a98a1679db951af24f06f169aa Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 14:53:36 -0700 Subject: [PATCH 32/35] fix: expand bucket options --- _worker.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/_worker.js b/_worker.js index eaad406..be63efb 100644 --- a/_worker.js +++ b/_worker.js @@ -210,7 +210,13 @@ async function fetch(req, env) { } const { user, pass } = parseAuthorization(req); - let s3Options = { accessKeyId: user, secretAccessKey: pass }; + let s3Options = { + accessKeyId: user, + secretAccessKey: pass, + region: "auto", + service: "s3", + endpoint: "https://bucket.aibtc.dev", + }; const segments = url.pathname.split("/").slice(1, -2); let params = {}; From 76fc7f11e211380b206d9160f2c3098bc13566b0 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 16:04:17 -0700 Subject: [PATCH 33/35] fix: just a bit more logging --- _worker.js | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/_worker.js b/_worker.js index be63efb..cd489f1 100644 --- a/_worker.js +++ b/_worker.js @@ -190,6 +190,11 @@ async function getSignedUrlForCompletion(s3, bucket, key, uploadId) { } async function fetch(req, env) { + console.log("=== request info ===", { + url: req.url, + method: req.method, + headers: Object.fromEntries(req.headers), + }); try { const url = new URL(req.url); @@ -215,7 +220,7 @@ async function fetch(req, env) { secretAccessKey: pass, region: "auto", service: "s3", - endpoint: "https://bucket.aibtc.dev", + endpoint: "https://bucket.aibtc.dev/aibtcdev-communications", }; const segments = url.pathname.split("/").slice(1, -2); @@ -254,6 +259,10 @@ async function fetch(req, env) { objects.map(async ({ oid, size }) => { try { if (operation === "upload" && size > PART_SIZE) { + console.log("Processing multipart upload for object:", { + oid, + size, + }); const { uploadId, partUrls, completeUrl, partCount } = await handleMultipartUpload(s3, bucket, prefix, oid, size); @@ -285,6 +294,10 @@ async function fetch(req, env) { }, }; } else { + console.log("Processing single part upload for object:", { + oid, + size, + }); const href = await sign( s3, bucket, @@ -341,6 +354,7 @@ async function fetch(req, env) { } else if (error.name === "AbortError") { return new Response("Request timed out", { status: 504 }); } else if (error.name === "TypeError") { + console.log("TypeError: This is where it's exiting"); return new Response("Bad request format", { status: 400 }); } else if (error.message.includes("NetworkError")) { return new Response("Network error occurred", { status: 503 }); From 17b3da00dff74c28ed7c79006c1938630d9e6a10 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 16:23:55 -0700 Subject: [PATCH 34/35] fix: small tweaks Still not getting something right, but closer to an answer. --- _worker.js | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/_worker.js b/_worker.js index cd489f1..dd90654 100644 --- a/_worker.js +++ b/_worker.js @@ -70,7 +70,7 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { console.log("Initiating multipart upload request:", { method: "POST", signedUrl, - url, + url: url.toString(), // encodedUrl, }); @@ -83,6 +83,13 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { }).catch((error) => { console.error("Error in fetch for initiateMultipartUpload:", error); }); + + if (!response.ok) { + throw new Error( + `R2 responded with status ${response.status}: ${responseBody}` + ); + } + const responseBody = await response.text(); console.log("=== Response Info ===", { @@ -92,12 +99,6 @@ async function initiateMultipartUpload(s3, bucket, prefix, oid) { // encodedUrl, }); - if (!response.ok) { - throw new Error( - `R2 responded with status ${response.status}: ${responseBody}` - ); - } - const uploadId = responseBody.match(/(.*?)<\/UploadId>/)[1]; if (!uploadId) { throw new Error("Failed to extract UploadId from R2 response"); From 89bfea4a2fbf339836c369cc0e534639cd44a55b Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Mon, 26 Aug 2024 16:26:41 -0700 Subject: [PATCH 35/35] docs: add old worker for side-by-side review Suspecting there's a better way to handle the overall flow here based on the diagram in the readme. Since something is failing along the way with URL generation, we need to isolate and figure out which steps are succeeding and which aren't. --- _worker.old.js | 119 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 _worker.old.js diff --git a/_worker.old.js b/_worker.old.js new file mode 100644 index 0000000..78cd16c --- /dev/null +++ b/_worker.old.js @@ -0,0 +1,119 @@ +import { AwsClient } from "aws4fetch"; + +const HOMEPAGE = "https://github.com/milkey-mouse/git-lfs-s3-proxy"; +const EXPIRY = 3600; + +const MIME = "application/vnd.git-lfs+json"; + +const METHOD_FOR = { + upload: "PUT", + download: "GET", +}; + +async function sign(s3, bucket, path, method) { + const info = { method }; + const signed = await s3.sign( + new Request(`https://${bucket}/${path}?X-Amz-Expires=${EXPIRY}`, info), + { aws: { signQuery: true } } + ); + return signed.url; +} + +function parseAuthorization(req) { + const auth = req.headers.get("Authorization"); + if (!auth) { + throw new Response(null, { status: 401 }); + } + + const [scheme, encoded] = auth.split(" "); + if (scheme !== "Basic" || !encoded) { + throw new Response(null, { status: 400 }); + } + + const buffer = Uint8Array.from(atob(encoded), (c) => c.charCodeAt(0)); + const decoded = new TextDecoder().decode(buffer).normalize(); + const index = decoded.indexOf(":"); + if (index === -1 || /[\0-\x1F\x7F]/.test(decoded)) { + throw new Response(null, { status: 400 }); + } + + return { user: decoded.slice(0, index), pass: decoded.slice(index + 1) }; +} + +async function fetch(req, env) { + const url = new URL(req.url); + + if (url.pathname == "/") { + if (req.method === "GET") { + return Response.redirect(HOMEPAGE, 302); + } else { + return new Response(null, { status: 405, headers: { Allow: "GET" } }); + } + } + + if (!url.pathname.endsWith("/objects/batch")) { + return new Response(null, { status: 404 }); + } + + if (req.method !== "POST") { + return new Response(null, { status: 405, headers: { Allow: "POST" } }); + } + + // in practice, we'd rather not break out-of-spec clients not setting these + /*if (!req.headers.get("Accept").startsWith(MIME) + || !req.headers.get("Content-Type").startsWith(MIME)) { + return new Response(null, { status: 406 }); + }*/ + + const { user, pass } = parseAuthorization(req); + let s3Options = { accessKeyId: user, secretAccessKey: pass }; + + const segments = url.pathname.split("/").slice(1, -2); + let params = {}; + let bucketIdx = 0; + for (const segment of segments) { + const sliceIdx = segment.indexOf("="); + if (sliceIdx === -1) { + break; + } else { + const key = decodeURIComponent(segment.slice(0, sliceIdx)); + const val = decodeURIComponent(segment.slice(sliceIdx + 1)); + s3Options[key] = val; + + bucketIdx++; + } + } + + const s3 = new AwsClient(s3Options); + const bucket = segments.slice(bucketIdx).join("/"); + const expires_in = params.expiry || env.EXPIRY || EXPIRY; + + const { objects, operation } = await req.json(); + const method = METHOD_FOR[operation]; + const response = JSON.stringify({ + transfer: "basic", + objects: await Promise.all( + objects.map(async ({ oid, size }) => ({ + oid, + size, + authenticated: true, + actions: { + [operation]: { + href: await sign(s3, bucket, oid, method), + expires_in, + }, + }, + })) + ), + }); + + return new Response(response, { + status: 200, + headers: { + "Cache-Control": "no-store", + "Content-Type": "application/vnd.git-lfs+json", + }, + }); +} + +export default { fetch };