Skip to content

Commit

Permalink
Merge pull request #8848 from tangledbytes/utkarsh/backport/5.14/DFBU…
Browse files Browse the repository at this point in the history
…GS-860

[Backport 5.14] Backport 8350 & 8347
  • Loading branch information
liranmauda authored Mar 4, 2025
2 parents 87b5c29 + e4a58dd commit 833f232
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 27 deletions.
16 changes: 11 additions & 5 deletions src/sdk/namespace_blob.js
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,17 @@ class NamespaceBlob {
} catch (err) {
this._translate_error_code(err);
dbg.warn('NamespaceBlob.read_object_md:', inspect(err));
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: err.code || (err.details && err.details.errorCode) || 'InternalError',
time: Date.now(),
});

// It's totally expected to issue `HeadObject` against an object that doesn't exist
// this shouldn't be counted as an issue for the namespace store
if (err.rpc_code !== 'NO_SUCH_OBJECT') {
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: err.code || (err.details && err.details.errorCode) || 'InternalError',
time: Date.now(),
});
}

throw err;
}
}
Expand Down
20 changes: 15 additions & 5 deletions src/sdk/namespace_s3.js
Original file line number Diff line number Diff line change
Expand Up @@ -207,11 +207,21 @@ class NamespaceS3 {
} catch (err) {
this._translate_error_code(params, err);
dbg.warn('NamespaceS3.read_object_md:', inspect(err));
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: String(err.code),
time: Date.now(),
});

// It's totally expected to issue `HeadObject` against an object that doesn't exist
// this shouldn't be counted as an issue for the namespace store
//
// @TODO: Another error to tolerate is 'InvalidObjectState'. This shouldn't also
// result in IO_ERROR for the namespace however that means we can not do `getObject`
// even when `can_use_get_inline` is true.
if (err.rpc_code !== 'NO_SUCH_OBJECT') {
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: String(err.code),
time: Date.now(),
});
}

throw err;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/server/bg_services/replication_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ async function copy_objects_mixed_types(req) {
await P.map_with_concurrency(100, keys, async key => {
const params = {
Bucket: dst_bucket_name.unwrap(),
CopySource: `/${src_bucket_name.unwrap()}/${key}`, // encodeURI for special chars is needed
CopySource: encodeURI(`/${src_bucket_name.unwrap()}/${key}`),
Key: key
};
try {
Expand Down
59 changes: 43 additions & 16 deletions src/test/unit_tests/test_bucket_replication.js
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,8 @@ mocha.describe('replication configuration bg worker tests', function() {
const bucket_for_replications = 'bucket5-br-bg';
const bucket_to_delete = 'bucket-to-delete';
const buckets = [bucket1, bucket2, bucket_to_delete, bucket3, bucket4, bucket_for_replications];
let uploaded_objects_count = 0;
let uploaded_prefix_objects_count = 0;
//const namespace_buckets = [];
let s3_owner;
let scanner;
Expand All @@ -255,6 +257,13 @@ mocha.describe('replication configuration bg worker tests', function() {
region: 'us-east-1',
httpOptions: { agent: new http.Agent({ keepAlive: false }) },
};

// Special character items to ensure encoding of URI works OK in the replication scanner
const special_character_items = [
'key1273-2__#$!@%!#__BED-END-1-Carton-13.jpeg',
'key1278-1__4267%2524__BED-END-1-Carton-13.jpeg'
];

mocha.before('init scanner & populate buckets', async function() {
// create buckets
await P.all(_.map(buckets, async bucket_name => {
Expand All @@ -271,9 +280,23 @@ mocha.describe('replication configuration bg worker tests', function() {
// populate buckets
for (let i = 0; i < 10; i++) {
let key = `key${i}`;
if (i % 2 === 0) key = 'pref' + key;
if (i % 2 === 0) {
key = 'pref' + key;
uploaded_prefix_objects_count += 1;
}
await put_object(s3_owner, bucket1, key);
uploaded_objects_count += 1;
}

// Add special characters items with prefix to the bucket
await Promise.all(special_character_items.map(item => put_object(s3_owner, bucket1, 'pref' + item)));
uploaded_objects_count += special_character_items.length;

// Add special characters items without prefix to the bucket
await Promise.all(special_character_items.map(item => put_object(s3_owner, bucket1, item)));
uploaded_objects_count += special_character_items.length;
uploaded_prefix_objects_count += special_character_items.length;

cloud_utils.set_noobaa_s3_connection = () => {
console.log('setting connection to coretest endpoint and access key');
return s3_owner;
Expand All @@ -293,11 +316,13 @@ mocha.describe('replication configuration bg worker tests', function() {
if (i % 2 === 0) key = 'pref' + key;
await delete_object(s3_owner, bucket_name, key);
}
await Promise.all(special_character_items.map(item => delete_object(s3_owner, bucket_name, 'pref' + item)));
await Promise.all(special_character_items.map(item => delete_object(s3_owner, bucket_name, item)));
await rpc_client.bucket.delete_bucket({ name: bucket_name });
}));
});

mocha.it('run replication scanner and wait - no replication - nothing to upload', async function() {
mocha.it('run replication scanner and wait - no replication rule - nothing to upload', async function() {
const res1 = await scanner.run_batch();
console.log('waiting for replication objects no objects to upload', res1);
await list_objects_and_wait(s3_owner, bucket_for_replications, 0);
Expand All @@ -318,16 +343,16 @@ mocha.describe('replication configuration bg worker tests', function() {
[{ rule_id: 'rule-1', destination_bucket: bucket_for_replications, filter: { prefix: 'pref' } }], false);
let res1 = await scanner.run_batch();
console.log('waiting for replication objects - one rule one prefix', res1);
let contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5);
let contents = await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count); //Check that the desired objects were replicated
console.log('contents', contents);

// delete object from dst
await s3_owner.deleteObject({ Bucket: bucket_for_replications, Key: contents[0].Key }).promise();
await list_objects_and_wait(s3_owner, bucket_for_replications, 4);
await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count - 1); //Verify that one object was deleted
// sync again
res1 = await scanner.run_batch();
console.log('waiting for replication objects - one rule one prefix', res1);
contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5);
contents = await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count); //Check that the delete object was replicate again
const key1 = contents[0].Key;
// override object in dst
const dst_obj1 = await s3_owner.getObject({ Bucket: bucket_for_replications, Key: key1 }).promise();
Expand Down Expand Up @@ -395,7 +420,7 @@ mocha.describe('replication configuration bg worker tests', function() {
});

mocha.it('run replication scanner and wait - no prefix - all objects should be uploaded', async function() {
const contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5);
const contents = await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count);
for (const content of contents) {
const key = content.Key;
await s3_owner.deleteObject({ Bucket: bucket_for_replications, Key: key }).promise();
Expand All @@ -404,7 +429,7 @@ mocha.describe('replication configuration bg worker tests', function() {
[{ rule_id: 'rule-1', destination_bucket: bucket_for_replications }], false);
const res1 = await scanner.run_batch();
console.log('waiting for replication objects - one rule no prefix', res1);
await list_objects_and_wait(s3_owner, bucket_for_replications, 10);
await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_objects_count);
});

mocha.it('run replication scanner and wait - 2 prefixes - all objects should be uploaded', async function() {
Expand All @@ -413,14 +438,14 @@ mocha.describe('replication configuration bg worker tests', function() {
{ rule_id: 'rule-2', destination_bucket: bucket2, filter: { prefix: 'pref' } }
], false);

const res = await list_objects_and_wait(s3_owner, bucket1, 10);
const res = await list_objects_and_wait(s3_owner, bucket1, uploaded_objects_count);
console.log('waiting for replication objects original bucket ', res);
let res1 = await scanner.run_batch();
console.log('waiting for replication objects - 2 rules 1 prefix1 ', res1);
await list_objects_and_wait(s3_owner, bucket2, 5);
await list_objects_and_wait(s3_owner, bucket2, 5 + special_character_items.length);
res1 = await scanner.run_batch();
console.log('waiting for replication objects - 2 rules 1 prefix2 ', res1);
await list_objects_and_wait(s3_owner, bucket2, 10);
await list_objects_and_wait(s3_owner, bucket2, uploaded_objects_count);
});

mocha.it('run replication scanner and wait - 2 buckets - all objects should be uploaded', async function() {
Expand All @@ -430,18 +455,20 @@ mocha.describe('replication configuration bg worker tests', function() {
], false);

await put_replication(bucket2,
[{ rule_id: 'rule-1', destination_bucket: bucket4, filter: { prefix: 'key' } },
{ rule_id: 'rule-2', destination_bucket: bucket3, filter: { prefix: 'pref' } }
[
{ rule_id: 'rule-1', destination_bucket: bucket4, sync_versions: false, filter: { prefix: 'key' } },
{ rule_id: 'rule-2', destination_bucket: bucket3, sync_versions: false, filter: { prefix: 'pref' } }
], false);
let res1 = await scanner.run_batch();
console.log('waiting for replication objects - 2 rules 1 prefix1 ', res1);
await list_objects_and_wait(s3_owner, bucket3, 5);
await list_objects_and_wait(s3_owner, bucket4, 5);
await list_objects_and_wait(s3_owner, bucket3, 5 + special_character_items.length);
await list_objects_and_wait(s3_owner, bucket4, uploaded_prefix_objects_count);

res1 = await scanner.run_batch();
console.log('waiting for replication objects - 2 rules 1 prefix2 ', res1);
await list_objects_and_wait(s3_owner, bucket3, 10);
await list_objects_and_wait(s3_owner, bucket4, 10);
// everything is uploaded by combination of above 2
await list_objects_and_wait(s3_owner, bucket3, uploaded_objects_count);
await list_objects_and_wait(s3_owner, bucket4, uploaded_objects_count);
});

});
Expand Down

0 comments on commit 833f232

Please sign in to comment.