@@ -227,7 +227,7 @@ async fn tx_task(
227227 let ( batch_size, _) = tx_compressed (
228228 is_compressed,
229229 link. is_streamed ( ) ,
230- & bytes,
230+ bytes,
231231 & mut compression_aux_buff,
232232 ) ?;
233233 bytes = & compression_aux_buff[ ..batch_size] ;
@@ -471,7 +471,7 @@ fn rx_decompress(
471471 end_pos : & mut usize ,
472472) -> ZResult < ( ) > {
473473 let is_compressed: bool = buffer[ COMPRESSION_BYTE_INDEX ] == COMPRESSION_ENABLED ;
474- Ok ( if is_compressed {
474+ if is_compressed {
475475 let mut aux_buff = pool. try_take ( ) . unwrap_or_else ( || pool. alloc ( ) ) ;
476476 * end_pos = lz4_flex:: block:: decompress_into (
477477 & buffer[ BATCH_PAYLOAD_START_INDEX ..read_bytes] ,
@@ -482,7 +482,8 @@ fn rx_decompress(
482482 } else {
483483 * start_pos = BATCH_PAYLOAD_START_INDEX ;
484484 * end_pos = read_bytes;
485- } )
485+ } ;
486+ Ok ( ( ) )
486487}
487488
488489#[ cfg( all( feature = "unstable" , feature = "transport_compression" ) ) ]
@@ -589,14 +590,11 @@ fn set_uncompressed_batch_header(
589590 if is_streamed {
590591 let mut header = [ 0_u8 , 0_u8 ] ;
591592 header[ ..HEADER_BYTES_SIZE ] . copy_from_slice ( & bytes[ ..HEADER_BYTES_SIZE ] ) ;
592- let mut batch_size = u16:: from_le_bytes ( header) ;
593- batch_size += 1 ;
594- let batch_size: u16 = batch_size. try_into ( ) . map_err ( |e| {
595- zerror ! (
596- "Compression error: unable to convert compression size into u16: {}" ,
597- e
598- )
599- } ) ?;
593+ let batch_size = if let Some ( size) = u16:: from_le_bytes ( header) . checked_add ( 1 ) {
594+ size
595+ } else {
596+ bail ! ( "Compression error: unable to convert compression size into u16" , )
597+ } ;
600598 buff[ 0 ..HEADER_BYTES_SIZE ] . copy_from_slice ( & batch_size. to_le_bytes ( ) ) ;
601599 buff[ COMPRESSION_BYTE_INDEX_STREAMED ] = COMPRESSION_DISABLED ;
602600 let batch_size: usize = batch_size. into ( ) ;
@@ -612,7 +610,7 @@ fn set_uncompressed_batch_header(
612610 // May happen when the payload size is itself the MTU and adding the header exceeds it.
613611 Err ( zerror ! ( "Failed to send uncompressed batch, batch size ({}) exceeds the maximum batch size of {}." , final_batch_size, MAX_BATCH_SIZE ) ) ?;
614612 }
615- return Ok ( final_batch_size) ;
613+ Ok ( final_batch_size)
616614}
617615
618616#[ cfg( all( feature = "transport_compression" , feature = "unstable" ) ) ]
@@ -626,47 +624,46 @@ fn tx_compression_test() {
626624 // Compression done for the sake of comparing the result.
627625 let payload_compression_size = lz4_flex:: block:: compress_into ( & payload, & mut buff) . unwrap ( ) ;
628626
629- fn get_header_value ( buff : & Box < [ u8 ] > ) -> u16 {
627+ fn get_header_value ( buff : & [ u8 ] ) -> u16 {
630628 let mut header = [ 0_u8 , 0_u8 ] ;
631629 header[ ..HEADER_BYTES_SIZE ] . copy_from_slice ( & buff[ ..HEADER_BYTES_SIZE ] ) ;
632- let batch_size = u16:: from_le_bytes ( header) ;
633- batch_size
630+ u16:: from_le_bytes ( header)
634631 }
635632
636633 // Streamed with compression enabled
637634 let batch = [ 16 , 0 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 ] ;
638635 let ( batch_size, was_compressed) = tx_compressed ( true , true , & batch, & mut buff) . unwrap ( ) ;
639636 let header = get_header_value ( & buff) ;
640- assert_eq ! ( was_compressed, true ) ;
637+ assert ! ( was_compressed) ;
641638 assert_eq ! ( header as usize , payload_compression_size + COMPRESSION_BYTE ) ;
642639 assert ! ( batch_size < batch. len( ) + COMPRESSION_BYTE ) ;
643640 assert_eq ! ( batch_size, payload_compression_size + 3 ) ;
644641
645642 // Not streamed with compression enabled
646643 let batch = payload;
647644 let ( batch_size, was_compressed) = tx_compressed ( true , false , & batch, & mut buff) . unwrap ( ) ;
648- assert_eq ! ( was_compressed, true ) ;
645+ assert ! ( was_compressed) ;
649646 assert ! ( batch_size < batch. len( ) + COMPRESSION_BYTE ) ;
650647 assert_eq ! ( batch_size, payload_compression_size + COMPRESSION_BYTE ) ;
651648
652649 // Streamed with compression disabled
653650 let batch = [ 16 , 0 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4 ] ;
654651 let ( batch_size, was_compressed) = tx_compressed ( false , true , & batch, & mut buff) . unwrap ( ) ;
655652 let header = get_header_value ( & buff) ;
656- assert_eq ! ( was_compressed, false ) ;
653+ assert ! ( ! was_compressed) ;
657654 assert_eq ! ( header as usize , payload. len( ) + COMPRESSION_BYTE ) ;
658655 assert_eq ! ( batch_size, batch. len( ) + COMPRESSION_BYTE ) ;
659656
660657 // Not streamed and compression disabled
661658 let batch = payload;
662659 let ( batch_size, was_compressed) = tx_compressed ( false , false , & batch, & mut buff) . unwrap ( ) ;
663- assert_eq ! ( was_compressed, false ) ;
660+ assert ! ( ! was_compressed) ;
664661 assert_eq ! ( batch_size, payload. len( ) + COMPRESSION_BYTE ) ;
665662
666663 // Verify that if the compression result is bigger than the original payload size, then the non compressed payload is returned.
667664 let batch = [ 16 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 ] ; // a non compressable payload with no repetitions
668665 let ( batch_size, was_compressed) = tx_compressed ( true , true , & batch, & mut buff) . unwrap ( ) ;
669- assert_eq ! ( was_compressed, false ) ;
666+ assert ! ( ! was_compressed) ;
670667 assert_eq ! ( batch_size, batch. len( ) + COMPRESSION_BYTE ) ;
671668}
672669
0 commit comments