Skip to content

Commit 833f232

Browse files
authored
Merge pull request #8848 from tangledbytes/utkarsh/backport/5.14/DFBUGS-860
[Backport 5.14] Backport 8350 & 8347
2 parents 87b5c29 + e4a58dd commit 833f232

File tree

4 files changed

+70
-27
lines changed

4 files changed

+70
-27
lines changed

src/sdk/namespace_blob.js

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -161,11 +161,17 @@ class NamespaceBlob {
161161
} catch (err) {
162162
this._translate_error_code(err);
163163
dbg.warn('NamespaceBlob.read_object_md:', inspect(err));
164-
object_sdk.rpc_client.pool.update_issues_report({
165-
namespace_resource_id: this.namespace_resource_id,
166-
error_code: err.code || (err.details && err.details.errorCode) || 'InternalError',
167-
time: Date.now(),
168-
});
164+
165+
// It's totally expected to issue `HeadObject` against an object that doesn't exist
166+
// this shouldn't be counted as an issue for the namespace store
167+
if (err.rpc_code !== 'NO_SUCH_OBJECT') {
168+
object_sdk.rpc_client.pool.update_issues_report({
169+
namespace_resource_id: this.namespace_resource_id,
170+
error_code: err.code || (err.details && err.details.errorCode) || 'InternalError',
171+
time: Date.now(),
172+
});
173+
}
174+
169175
throw err;
170176
}
171177
}

src/sdk/namespace_s3.js

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -207,11 +207,21 @@ class NamespaceS3 {
207207
} catch (err) {
208208
this._translate_error_code(params, err);
209209
dbg.warn('NamespaceS3.read_object_md:', inspect(err));
210-
object_sdk.rpc_client.pool.update_issues_report({
211-
namespace_resource_id: this.namespace_resource_id,
212-
error_code: String(err.code),
213-
time: Date.now(),
214-
});
210+
211+
// It's totally expected to issue `HeadObject` against an object that doesn't exist
212+
// this shouldn't be counted as an issue for the namespace store
213+
//
214+
// @TODO: Another error to tolerate is 'InvalidObjectState'. This shouldn't also
215+
// result in IO_ERROR for the namespace however that means we can not do `getObject`
216+
// even when `can_use_get_inline` is true.
217+
if (err.rpc_code !== 'NO_SUCH_OBJECT') {
218+
object_sdk.rpc_client.pool.update_issues_report({
219+
namespace_resource_id: this.namespace_resource_id,
220+
error_code: String(err.code),
221+
time: Date.now(),
222+
});
223+
}
224+
215225
throw err;
216226
}
217227
}

src/server/bg_services/replication_server.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ async function copy_objects_mixed_types(req) {
7474
await P.map_with_concurrency(100, keys, async key => {
7575
const params = {
7676
Bucket: dst_bucket_name.unwrap(),
77-
CopySource: `/${src_bucket_name.unwrap()}/${key}`, // encodeURI for special chars is needed
77+
CopySource: encodeURI(`/${src_bucket_name.unwrap()}/${key}`),
7878
Key: key
7979
};
8080
try {

src/test/unit_tests/test_bucket_replication.js

Lines changed: 43 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,8 @@ mocha.describe('replication configuration bg worker tests', function() {
244244
const bucket_for_replications = 'bucket5-br-bg';
245245
const bucket_to_delete = 'bucket-to-delete';
246246
const buckets = [bucket1, bucket2, bucket_to_delete, bucket3, bucket4, bucket_for_replications];
247+
let uploaded_objects_count = 0;
248+
let uploaded_prefix_objects_count = 0;
247249
//const namespace_buckets = [];
248250
let s3_owner;
249251
let scanner;
@@ -255,6 +257,13 @@ mocha.describe('replication configuration bg worker tests', function() {
255257
region: 'us-east-1',
256258
httpOptions: { agent: new http.Agent({ keepAlive: false }) },
257259
};
260+
261+
// Special character items to ensure encoding of URI works OK in the replication scanner
262+
const special_character_items = [
263+
'key1273-2__#$!@%!#__BED-END-1-Carton-13.jpeg',
264+
'key1278-1__4267%2524__BED-END-1-Carton-13.jpeg'
265+
];
266+
258267
mocha.before('init scanner & populate buckets', async function() {
259268
// create buckets
260269
await P.all(_.map(buckets, async bucket_name => {
@@ -271,9 +280,23 @@ mocha.describe('replication configuration bg worker tests', function() {
271280
// populate buckets
272281
for (let i = 0; i < 10; i++) {
273282
let key = `key${i}`;
274-
if (i % 2 === 0) key = 'pref' + key;
283+
if (i % 2 === 0) {
284+
key = 'pref' + key;
285+
uploaded_prefix_objects_count += 1;
286+
}
275287
await put_object(s3_owner, bucket1, key);
288+
uploaded_objects_count += 1;
276289
}
290+
291+
// Add special characters items with prefix to the bucket
292+
await Promise.all(special_character_items.map(item => put_object(s3_owner, bucket1, 'pref' + item)));
293+
uploaded_objects_count += special_character_items.length;
294+
295+
// Add special characters items without prefix to the bucket
296+
await Promise.all(special_character_items.map(item => put_object(s3_owner, bucket1, item)));
297+
uploaded_objects_count += special_character_items.length;
298+
uploaded_prefix_objects_count += special_character_items.length;
299+
277300
cloud_utils.set_noobaa_s3_connection = () => {
278301
console.log('setting connection to coretest endpoint and access key');
279302
return s3_owner;
@@ -293,11 +316,13 @@ mocha.describe('replication configuration bg worker tests', function() {
293316
if (i % 2 === 0) key = 'pref' + key;
294317
await delete_object(s3_owner, bucket_name, key);
295318
}
319+
await Promise.all(special_character_items.map(item => delete_object(s3_owner, bucket_name, 'pref' + item)));
320+
await Promise.all(special_character_items.map(item => delete_object(s3_owner, bucket_name, item)));
296321
await rpc_client.bucket.delete_bucket({ name: bucket_name });
297322
}));
298323
});
299324

300-
mocha.it('run replication scanner and wait - no replication - nothing to upload', async function() {
325+
mocha.it('run replication scanner and wait - no replication rule - nothing to upload', async function() {
301326
const res1 = await scanner.run_batch();
302327
console.log('waiting for replication objects no objects to upload', res1);
303328
await list_objects_and_wait(s3_owner, bucket_for_replications, 0);
@@ -318,16 +343,16 @@ mocha.describe('replication configuration bg worker tests', function() {
318343
[{ rule_id: 'rule-1', destination_bucket: bucket_for_replications, filter: { prefix: 'pref' } }], false);
319344
let res1 = await scanner.run_batch();
320345
console.log('waiting for replication objects - one rule one prefix', res1);
321-
let contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5);
346+
let contents = await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count); //Check that the desired objects were replicated
322347
console.log('contents', contents);
323348

324349
// delete object from dst
325350
await s3_owner.deleteObject({ Bucket: bucket_for_replications, Key: contents[0].Key }).promise();
326-
await list_objects_and_wait(s3_owner, bucket_for_replications, 4);
351+
await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count - 1); //Verify that one object was deleted
327352
// sync again
328353
res1 = await scanner.run_batch();
329354
console.log('waiting for replication objects - one rule one prefix', res1);
330-
contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5);
355+
contents = await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count); //Check that the delete object was replicate again
331356
const key1 = contents[0].Key;
332357
// override object in dst
333358
const dst_obj1 = await s3_owner.getObject({ Bucket: bucket_for_replications, Key: key1 }).promise();
@@ -395,7 +420,7 @@ mocha.describe('replication configuration bg worker tests', function() {
395420
});
396421

397422
mocha.it('run replication scanner and wait - no prefix - all objects should be uploaded', async function() {
398-
const contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5);
423+
const contents = await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_prefix_objects_count);
399424
for (const content of contents) {
400425
const key = content.Key;
401426
await s3_owner.deleteObject({ Bucket: bucket_for_replications, Key: key }).promise();
@@ -404,7 +429,7 @@ mocha.describe('replication configuration bg worker tests', function() {
404429
[{ rule_id: 'rule-1', destination_bucket: bucket_for_replications }], false);
405430
const res1 = await scanner.run_batch();
406431
console.log('waiting for replication objects - one rule no prefix', res1);
407-
await list_objects_and_wait(s3_owner, bucket_for_replications, 10);
432+
await list_objects_and_wait(s3_owner, bucket_for_replications, uploaded_objects_count);
408433
});
409434

410435
mocha.it('run replication scanner and wait - 2 prefixes - all objects should be uploaded', async function() {
@@ -413,14 +438,14 @@ mocha.describe('replication configuration bg worker tests', function() {
413438
{ rule_id: 'rule-2', destination_bucket: bucket2, filter: { prefix: 'pref' } }
414439
], false);
415440

416-
const res = await list_objects_and_wait(s3_owner, bucket1, 10);
441+
const res = await list_objects_and_wait(s3_owner, bucket1, uploaded_objects_count);
417442
console.log('waiting for replication objects original bucket ', res);
418443
let res1 = await scanner.run_batch();
419444
console.log('waiting for replication objects - 2 rules 1 prefix1 ', res1);
420-
await list_objects_and_wait(s3_owner, bucket2, 5);
445+
await list_objects_and_wait(s3_owner, bucket2, 5 + special_character_items.length);
421446
res1 = await scanner.run_batch();
422447
console.log('waiting for replication objects - 2 rules 1 prefix2 ', res1);
423-
await list_objects_and_wait(s3_owner, bucket2, 10);
448+
await list_objects_and_wait(s3_owner, bucket2, uploaded_objects_count);
424449
});
425450

426451
mocha.it('run replication scanner and wait - 2 buckets - all objects should be uploaded', async function() {
@@ -430,18 +455,20 @@ mocha.describe('replication configuration bg worker tests', function() {
430455
], false);
431456

432457
await put_replication(bucket2,
433-
[{ rule_id: 'rule-1', destination_bucket: bucket4, filter: { prefix: 'key' } },
434-
{ rule_id: 'rule-2', destination_bucket: bucket3, filter: { prefix: 'pref' } }
458+
[
459+
{ rule_id: 'rule-1', destination_bucket: bucket4, sync_versions: false, filter: { prefix: 'key' } },
460+
{ rule_id: 'rule-2', destination_bucket: bucket3, sync_versions: false, filter: { prefix: 'pref' } }
435461
], false);
436462
let res1 = await scanner.run_batch();
437463
console.log('waiting for replication objects - 2 rules 1 prefix1 ', res1);
438-
await list_objects_and_wait(s3_owner, bucket3, 5);
439-
await list_objects_and_wait(s3_owner, bucket4, 5);
464+
await list_objects_and_wait(s3_owner, bucket3, 5 + special_character_items.length);
465+
await list_objects_and_wait(s3_owner, bucket4, uploaded_prefix_objects_count);
440466

441467
res1 = await scanner.run_batch();
442468
console.log('waiting for replication objects - 2 rules 1 prefix2 ', res1);
443-
await list_objects_and_wait(s3_owner, bucket3, 10);
444-
await list_objects_and_wait(s3_owner, bucket4, 10);
469+
// everything is uploaded by combination of above 2
470+
await list_objects_and_wait(s3_owner, bucket3, uploaded_objects_count);
471+
await list_objects_and_wait(s3_owner, bucket4, uploaded_objects_count);
445472
});
446473

447474
});

0 commit comments

Comments
 (0)