@@ -227,7 +227,7 @@ async fn tx_task(
227
227
let ( batch_size, _) = tx_compressed (
228
228
is_compressed,
229
229
link. is_streamed ( ) ,
230
- & bytes,
230
+ bytes,
231
231
& mut compression_aux_buff,
232
232
) ?;
233
233
bytes = & compression_aux_buff[ ..batch_size] ;
@@ -471,7 +471,7 @@ fn rx_decompress(
471
471
end_pos : & mut usize ,
472
472
) -> ZResult < ( ) > {
473
473
let is_compressed: bool = buffer[ COMPRESSION_BYTE_INDEX ] == COMPRESSION_ENABLED ;
474
- Ok ( if is_compressed {
474
+ if is_compressed {
475
475
let mut aux_buff = pool. try_take ( ) . unwrap_or_else ( || pool. alloc ( ) ) ;
476
476
* end_pos = lz4_flex:: block:: decompress_into (
477
477
& buffer[ BATCH_PAYLOAD_START_INDEX ..read_bytes] ,
@@ -482,7 +482,8 @@ fn rx_decompress(
482
482
} else {
483
483
* start_pos = BATCH_PAYLOAD_START_INDEX ;
484
484
* end_pos = read_bytes;
485
- } )
485
+ } ;
486
+ Ok ( ( ) )
486
487
}
487
488
488
489
#[ cfg( all( feature = "unstable" , feature = "transport_compression" ) ) ]
@@ -589,14 +590,11 @@ fn set_uncompressed_batch_header(
589
590
if is_streamed {
590
591
let mut header = [ 0_u8 , 0_u8 ] ;
591
592
header[ ..HEADER_BYTES_SIZE ] . copy_from_slice ( & bytes[ ..HEADER_BYTES_SIZE ] ) ;
592
- let mut batch_size = u16:: from_le_bytes ( header) ;
593
- batch_size += 1 ;
594
- let batch_size: u16 = batch_size. try_into ( ) . map_err ( |e| {
595
- zerror ! (
596
- "Compression error: unable to convert compression size into u16: {}" ,
597
- e
598
- )
599
- } ) ?;
593
+ let batch_size = if let Some ( size) = u16:: from_le_bytes ( header) . checked_add ( 1 ) {
594
+ size
595
+ } else {
596
+ bail ! ( "Compression error: unable to convert compression size into u16" , )
597
+ } ;
600
598
buff[ 0 ..HEADER_BYTES_SIZE ] . copy_from_slice ( & batch_size. to_le_bytes ( ) ) ;
601
599
buff[ COMPRESSION_BYTE_INDEX_STREAMED ] = COMPRESSION_DISABLED ;
602
600
let batch_size: usize = batch_size. into ( ) ;
@@ -612,7 +610,7 @@ fn set_uncompressed_batch_header(
612
610
// May happen when the payload size is itself the MTU and adding the header exceeds it.
613
611
Err ( zerror ! ( "Failed to send uncompressed batch, batch size ({}) exceeds the maximum batch size of {}." , final_batch_size, MAX_BATCH_SIZE ) ) ?;
614
612
}
615
- return Ok ( final_batch_size) ;
613
+ Ok ( final_batch_size)
616
614
}
617
615
618
616
#[ cfg( all( feature = "transport_compression" , feature = "unstable" ) ) ]
@@ -626,47 +624,46 @@ fn tx_compression_test() {
626
624
// Compression done for the sake of comparing the result.
627
625
let payload_compression_size = lz4_flex:: block:: compress_into ( & payload, & mut buff) . unwrap ( ) ;
628
626
629
- fn get_header_value ( buff : & Box < [ u8 ] > ) -> u16 {
627
+ fn get_header_value ( buff : & [ u8 ] ) -> u16 {
630
628
let mut header = [ 0_u8 , 0_u8 ] ;
631
629
header[ ..HEADER_BYTES_SIZE ] . copy_from_slice ( & buff[ ..HEADER_BYTES_SIZE ] ) ;
632
- let batch_size = u16:: from_le_bytes ( header) ;
633
- batch_size
630
+ u16:: from_le_bytes ( header)
634
631
}
635
632
636
633
// Streamed with compression enabled
637
634
let batch = [ 16 , 0 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 ] ;
638
635
let ( batch_size, was_compressed) = tx_compressed ( true , true , & batch, & mut buff) . unwrap ( ) ;
639
636
let header = get_header_value ( & buff) ;
640
- assert_eq ! ( was_compressed, true ) ;
637
+ assert ! ( was_compressed) ;
641
638
assert_eq ! ( header as usize , payload_compression_size + COMPRESSION_BYTE ) ;
642
639
assert ! ( batch_size < batch. len( ) + COMPRESSION_BYTE ) ;
643
640
assert_eq ! ( batch_size, payload_compression_size + 3 ) ;
644
641
645
642
// Not streamed with compression enabled
646
643
let batch = payload;
647
644
let ( batch_size, was_compressed) = tx_compressed ( true , false , & batch, & mut buff) . unwrap ( ) ;
648
- assert_eq ! ( was_compressed, true ) ;
645
+ assert ! ( was_compressed) ;
649
646
assert ! ( batch_size < batch. len( ) + COMPRESSION_BYTE ) ;
650
647
assert_eq ! ( batch_size, payload_compression_size + COMPRESSION_BYTE ) ;
651
648
652
649
// Streamed with compression disabled
653
650
let batch = [ 16 , 0 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 ] ;
654
651
let ( batch_size, was_compressed) = tx_compressed ( false , true , & batch, & mut buff) . unwrap ( ) ;
655
652
let header = get_header_value ( & buff) ;
656
- assert_eq ! ( was_compressed, false ) ;
653
+ assert ! ( ! was_compressed) ;
657
654
assert_eq ! ( header as usize , payload. len( ) + COMPRESSION_BYTE ) ;
658
655
assert_eq ! ( batch_size, batch. len( ) + COMPRESSION_BYTE ) ;
659
656
660
657
// Not streamed and compression disabled
661
658
let batch = payload;
662
659
let ( batch_size, was_compressed) = tx_compressed ( false , false , & batch, & mut buff) . unwrap ( ) ;
663
- assert_eq ! ( was_compressed, false ) ;
660
+ assert ! ( ! was_compressed) ;
664
661
assert_eq ! ( batch_size, payload. len( ) + COMPRESSION_BYTE ) ;
665
662
666
663
// Verify that if the compression result is bigger than the original payload size, then the non compressed payload is returned.
667
664
let batch = [ 16 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 ] ; // a non compressable payload with no repetitions
668
665
let ( batch_size, was_compressed) = tx_compressed ( true , true , & batch, & mut buff) . unwrap ( ) ;
669
- assert_eq ! ( was_compressed, false ) ;
666
+ assert ! ( ! was_compressed) ;
670
667
assert_eq ! ( batch_size, batch. len( ) + COMPRESSION_BYTE ) ;
671
668
}
672
669
0 commit comments