Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: remove azure bug comment #369

Merged
merged 2 commits into from
Jan 9, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions backend/azure/azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -395,8 +395,6 @@ func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string)
return err
}

//TODO: SDK has a bug here: it recommends to use the method to remove tags by passing an empty map,
// but the method panics because of incorrect implementation
_, err = client.SetTags(ctx, map[string]string{}, nil)
if err != nil {
return azureErrToS3Err(err)
Expand All @@ -405,14 +403,34 @@ func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string)
return nil
}

// Multipart upload starts with UploadPart action.
func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
// Multipart upload starts with UploadPart action so there is no
// correlating function for creating mutlipart uploads.
// TODO: since azure only allows for a single multipart upload
// for an object name at a time, we need to send an error back to
// the client if there is already an outstanding upload in progress
// for this object.
// Alternatively, is there something we can do with upload ids to
// keep concurrent uploads unique still? I haven't found an efficient
// way to rename final objects.
return &s3.CreateMultipartUploadOutput{
Bucket: input.Bucket,
Key: input.Key,
UploadId: input.Key,
}, nil
}

// Each part is translated into an uncommitted block in a newly created blob in staging area
func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) {
client, err := az.getBlockBlobClient(*input.Bucket, *input.Key)
if err != nil {
return "", err
}

// TODO: request streamable version of StageBlock()
// (*blockblob.Client).StageBlock does not have a streamable
// version of this function at this time, so we need to cache
// the body in memory to create an io.ReadSeekCloser
rdr, err := getReadSeekCloser(input.Body)
if err != nil {
return "", err
Expand Down Expand Up @@ -560,6 +578,7 @@ func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipa

// Deletes the block blob with committed/uncommitted blocks
func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error {
// TODO: need to verify this blob has uncommitted blocks?
_, err := az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil)
if err != nil {
return azureErrToS3Err(err)
Expand Down Expand Up @@ -714,7 +733,6 @@ func azureErrToS3Err(apiErr error) error {
Description: azErr.RawResponse.Status,
HTTPStatusCode: azErr.StatusCode,
}
fmt.Println(resp)
return resp
}

Expand Down
Loading