Skip to content

Commit 2fc5eb5

Browse files
committed
test and refactor.
- end to end test for inlined object - simplify some fs function
1 parent 3956540 commit 2fc5eb5

File tree

4 files changed

+160
-131
lines changed

4 files changed

+160
-131
lines changed

src/cas/fs.rs

Lines changed: 72 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -135,10 +135,10 @@ impl CasFS {
135135
bucket_name: &str,
136136
key: &str,
137137
size: u64,
138-
e_tag: BlockID,
138+
hash: BlockID,
139139
object_data: ObjectData,
140140
) -> Result<Object, MetaError> {
141-
let obj_meta = Object::new(size, e_tag, object_data);
141+
let obj_meta = Object::new(size, hash, object_data);
142142
let bucket = self.meta_store.get_bucket_tree(bucket_name)?;
143143
bucket.insert_meta(key, obj_meta.to_vec())?;
144144
Ok(obj_meta)
@@ -242,24 +242,6 @@ impl CasFS {
242242
self.meta_store.list_buckets()
243243
}
244244

245-
pub fn store_inlined_object(
246-
&self,
247-
bucket_name: &str,
248-
key: &str,
249-
data: Vec<u8>,
250-
) -> Result<Object, MetaError> {
251-
let content_hash = Md5::digest(&data).into();
252-
let size = data.len() as u64;
253-
let obj = self.create_object_meta(
254-
bucket_name,
255-
key,
256-
size,
257-
content_hash,
258-
ObjectData::Inline { data },
259-
)?;
260-
Ok(obj)
261-
}
262-
263245
/// Delete an object from a bucket.
264246
pub async fn delete_object(&self, bucket: &str, key: &str) -> Result<(), MetaError> {
265247
let path_map = self.path_tree()?;
@@ -290,25 +272,23 @@ impl CasFS {
290272
}
291273

292274
// convenient function to store an object to disk and then store it's metada
293-
pub async fn store_object_and_meta(
275+
pub async fn store_single_object_and_meta(
294276
&self,
295277
bucket_name: &str,
296278
key: &str,
297279
data: ByteStream,
298-
) -> io::Result<(Object, Vec<BlockID>, BlockID, u64)> {
280+
) -> io::Result<Object> {
299281
let (blocks, content_hash, size) = self.store_object(bucket_name, key, data).await?;
300282
let obj = self
301283
.create_object_meta(
302284
bucket_name,
303285
key,
304286
size,
305287
content_hash,
306-
ObjectData::SinglePart {
307-
blocks: blocks.clone(),
308-
},
288+
ObjectData::SinglePart { blocks },
309289
)
310290
.unwrap();
311-
Ok((obj, blocks, content_hash, size))
291+
Ok(obj)
312292
}
313293

314294
/// Save the stream of bytes to disk.
@@ -439,6 +419,25 @@ impl CasFS {
439419
size,
440420
))
441421
}
422+
423+
// Store an object inlined in the metadata.
424+
pub fn store_inlined_object(
425+
&self,
426+
bucket_name: &str,
427+
key: &str,
428+
data: Vec<u8>,
429+
) -> Result<Object, MetaError> {
430+
let content_hash = Md5::digest(&data).into();
431+
let size = data.len() as u64;
432+
let obj = self.create_object_meta(
433+
bucket_name,
434+
key,
435+
size,
436+
content_hash,
437+
ObjectData::Inline { data },
438+
)?;
439+
Ok(obj)
440+
}
442441
}
443442

444443
#[cfg(test)]
@@ -485,18 +484,18 @@ mod tests {
485484
));
486485

487486
// Store object
488-
let (_, block_ids, _, size) = fs
489-
.store_object_and_meta(bucket_name, key1, stream)
487+
let obj = fs
488+
.store_single_object_and_meta(bucket_name, key1, stream)
490489
.await
491490
.unwrap();
492491

493492
// Verify results
494-
assert_eq!(size, test_data_len as u64);
495-
assert_eq!(block_ids.len(), 1);
493+
assert_eq!(obj.size(), test_data_len as u64);
494+
assert_eq!(obj.blocks().len(), 1);
496495

497496
// Verify block & path was stored
498497
let block_tree = fs.meta_store.get_block_tree().unwrap();
499-
let stored_block = block_tree.get_block(&block_ids[0]).unwrap().unwrap();
498+
let stored_block = block_tree.get_block(&obj.blocks()[0]).unwrap().unwrap();
500499
assert_eq!(stored_block.size(), test_data_len);
501500
assert_eq!(stored_block.rc(), 1);
502501
assert_eq!(
@@ -515,14 +514,14 @@ mod tests {
515514
async move { Ok(Bytes::from(test_data_2.clone())) },
516515
));
517516

518-
let (_, new_blocks, _, _) = fs
519-
.store_object_and_meta(bucket_name, key2, stream)
517+
let new_obj = fs
518+
.store_single_object_and_meta(bucket_name, key2, stream)
520519
.await
521520
.unwrap();
522521

523-
assert_eq!(new_blocks, block_ids);
522+
assert_eq!(new_obj.blocks(), obj.blocks());
524523

525-
let stored_block = block_tree.get_block(&new_blocks[0]).unwrap().unwrap();
524+
let stored_block = block_tree.get_block(&new_obj.blocks()[0]).unwrap().unwrap();
526525
assert_eq!(stored_block.rc(), 2);
527526
}
528527

@@ -545,14 +544,14 @@ mod tests {
545544
));
546545

547546
// Store object
548-
let (_, block_ids, _, _) = fs
549-
.store_object_and_meta(bucket_name, key1, stream)
547+
let obj = fs
548+
.store_single_object_and_meta(bucket_name, key1, stream)
550549
.await
551550
.unwrap();
552551

553552
// Initial refcount must be 1
554553
let block_tree = fs.meta_store.get_block_tree().unwrap();
555-
let stored_block = block_tree.get_block(&block_ids[0]).unwrap().unwrap();
554+
let stored_block = block_tree.get_block(&obj.blocks()[0]).unwrap().unwrap();
556555
assert_eq!(stored_block.rc(), 1);
557556

558557
{
@@ -564,14 +563,14 @@ mod tests {
564563
async move { Ok(Bytes::from(test_data_2.clone())) },
565564
));
566565

567-
let (_, new_blocks, _, _) = fs
568-
.store_object_and_meta(bucket_name, key1, stream)
566+
let new_obj = fs
567+
.store_single_object_and_meta(bucket_name, key1, stream)
569568
.await
570569
.unwrap();
571570

572-
assert_eq!(new_blocks, block_ids);
571+
assert_eq!(new_obj.blocks(), obj.blocks());
573572

574-
let stored_block = block_tree.get_block(&new_blocks[0]).unwrap().unwrap();
573+
let stored_block = block_tree.get_block(&new_obj.blocks()[0]).unwrap().unwrap();
575574
assert_eq!(stored_block.rc(), 1);
576575
}
577576
{
@@ -582,14 +581,14 @@ mod tests {
582581
async move { Ok(Bytes::from(test_data_3.clone())) },
583582
));
584583

585-
let (_, new_blocks, _, _) = fs
586-
.store_object_and_meta(bucket_name, key2, stream)
584+
let new_obj = fs
585+
.store_single_object_and_meta(bucket_name, key2, stream)
587586
.await
588587
.unwrap();
589588

590-
assert_eq!(new_blocks, block_ids);
589+
assert_eq!(new_obj.blocks(), obj.blocks());
591590

592-
let stored_block = block_tree.get_block(&new_blocks[0]).unwrap().unwrap();
591+
let stored_block = block_tree.get_block(&new_obj.blocks()[0]).unwrap().unwrap();
593592
assert_eq!(stored_block.rc(), 2);
594593
}
595594
}
@@ -613,8 +612,8 @@ mod tests {
613612
));
614613

615614
// Store object
616-
let (_, block_ids, _, _) = fs
617-
.store_object_and_meta(bucket_name, key, stream)
615+
let obj = fs
616+
.store_single_object_and_meta(bucket_name, key, stream)
618617
.await
619618
.unwrap();
620619

@@ -625,8 +624,8 @@ mod tests {
625624
// verify blocks and path exist
626625
let block_tree = fs.meta_store.get_block_tree().unwrap();
627626
let mut stored_paths = Vec::new();
628-
for id in block_ids.clone() {
629-
let block = block_tree.get_block(&id).unwrap().unwrap();
627+
for id in obj.blocks() {
628+
let block = block_tree.get_block(id).unwrap().unwrap();
630629
assert_eq!(
631630
fs.path_tree().unwrap().contains_key(block.path()).unwrap(),
632631
true
@@ -643,8 +642,8 @@ mod tests {
643642

644643
// Verify blocks were cleaned up
645644
let block_tree = fs.meta_store.get_block_tree().unwrap();
646-
for id in block_ids {
647-
assert!(block_tree.get_block(&id).unwrap().is_none());
645+
for id in obj.blocks() {
646+
assert!(block_tree.get_block(id).unwrap().is_none());
648647
}
649648
// Verify paths were cleaned up
650649
for path in stored_paths {
@@ -679,13 +678,13 @@ mod tests {
679678
));
680679

681680
// Store first object
682-
let (_, block_ids1, content_hash1, _) = fs
683-
.store_object_and_meta(bucket, key1, stream1)
681+
let obj1 = fs
682+
.store_single_object_and_meta(bucket, key1, stream1)
684683
.await
685684
.unwrap();
686685
// Verify blocks exist with rc=1
687686
let block_tree = fs.meta_store.get_block_tree().unwrap();
688-
for id in &block_ids1 {
687+
for id in obj1.blocks() {
689688
let block = block_tree.get_block(id).unwrap().unwrap();
690689
assert_eq!(block.rc(), 1);
691690
}
@@ -696,17 +695,17 @@ mod tests {
696695
async move { Ok(Bytes::from(test_data2.clone())) },
697696
));
698697

699-
let (_, block_ids2, content_hash2, _) = fs
700-
.store_object_and_meta(bucket, key2, stream2)
698+
let obj2 = fs
699+
.store_single_object_and_meta(bucket, key2, stream2)
701700
.await
702701
.unwrap();
703702

704703
// Verify both objects share same blocks
705-
assert_eq!(block_ids1, block_ids2);
706-
assert_eq!(content_hash1, content_hash2);
704+
assert_eq!(obj1.blocks(), obj2.blocks());
705+
assert_eq!(obj1.hash(), obj2.hash());
707706
// Verify blocks exist with rc=2
708707
let block_tree = fs.meta_store.get_block_tree().unwrap();
709-
for id in &block_ids2 {
708+
for id in obj2.blocks() {
710709
let block = block_tree.get_block(id).unwrap().unwrap();
711710
assert_eq!(block.rc(), 2);
712711
}
@@ -716,7 +715,7 @@ mod tests {
716715

717716
// Verify blocks still exist
718717
let block_tree = fs.meta_store.get_block_tree().unwrap();
719-
for id in &block_ids1 {
718+
for id in obj1.blocks() {
720719
let block = block_tree.get_block(id).unwrap().unwrap();
721720
assert_eq!(block.rc(), 1);
722721
}
@@ -725,8 +724,8 @@ mod tests {
725724
fs.delete_object(bucket, key2).await.unwrap();
726725

727726
// Verify blocks are gone
728-
for id in block_ids1 {
729-
assert!(block_tree.get_block(&id).unwrap().is_none());
727+
for id in obj1.blocks() {
728+
assert!(block_tree.get_block(id).unwrap().is_none());
730729
}
731730
}
732731

@@ -754,13 +753,13 @@ mod tests {
754753
));
755754

756755
// Store first object
757-
let (_, block_ids1, content_hash1, _) = fs
758-
.store_object_and_meta(bucket, key1, stream1)
756+
let obj1 = fs
757+
.store_single_object_and_meta(bucket, key1, stream1)
759758
.await
760759
.unwrap();
761760
// Verify blocks exist with rc=1
762761
let block_tree = fs.meta_store.get_block_tree().unwrap();
763-
for id in &block_ids1 {
762+
for id in obj1.blocks() {
764763
let block = block_tree.get_block(id).unwrap().unwrap();
765764
assert_eq!(block.rc(), 1);
766765
}
@@ -771,17 +770,17 @@ mod tests {
771770
async move { Ok(Bytes::from(test_data2.clone())) },
772771
));
773772

774-
let (_, block_ids2, content_hash2, _) = fs
775-
.store_object_and_meta(bucket, key1, stream2)
773+
let obj2 = fs
774+
.store_single_object_and_meta(bucket, key1, stream2)
776775
.await
777776
.unwrap();
778777

779778
// Verify both objects share same blocks
780-
assert_eq!(block_ids1, block_ids2);
781-
assert_eq!(content_hash1, content_hash2);
779+
assert_eq!(obj1.blocks(), obj2.blocks());
780+
assert_eq!(obj1.hash(), obj2.hash());
782781
// Verify blocks exist with rc=2
783782
let block_tree = fs.meta_store.get_block_tree().unwrap();
784-
for id in &block_ids2 {
783+
for id in obj2.blocks() {
785784
let block = block_tree.get_block(id).unwrap().unwrap();
786785
assert_eq!(block.rc(), 1);
787786
}
@@ -790,8 +789,8 @@ mod tests {
790789
fs.delete_object(bucket, key1).await.unwrap();
791790

792791
// Verify blocks are gone
793-
for id in block_ids1 {
794-
assert!(block_tree.get_block(&id).unwrap().is_none());
792+
for id in obj1.blocks() {
793+
assert!(block_tree.get_block(id).unwrap().is_none());
795794
}
796795
}
797796
}

0 commit comments

Comments
 (0)