@@ -152,6 +152,8 @@ pub enum Cache {
152152         idx :  usize , 
153153        /// The store of blocks. 
154154         blocks :  Vec < Block > , 
155+         /// The amount of data stored in one cycle, or all blocks 
156+          total_cycle_size :  u64 , 
155157    } , 
156158} 
157159
@@ -320,7 +322,17 @@ impl Cache {
320322                construct_block_cache_inner ( rng,  & pyld,  maximum_block_bytes,  total_bytes. get ( ) ) ?
321323            } 
322324        } ; 
323-         Ok ( Self :: Fixed  {  idx :  0 ,  blocks } ) 
325+ 
326+         let  total_cycle_size = blocks
327+             . iter ( ) 
328+             . map ( |block| u64:: from ( block. total_bytes . get ( ) ) ) 
329+             . sum ( ) ; 
330+ 
331+         Ok ( Self :: Fixed  { 
332+             idx :  0 , 
333+             blocks, 
334+             total_cycle_size, 
335+         } ) 
324336    } 
325337
326338    /// Run `Cache` forward on the user-provided mpsc sender. 
@@ -336,7 +348,9 @@ impl Cache {
336348     #[ allow( clippy:: needless_pass_by_value) ]  
337349    pub  fn  spin ( self ,  snd :  Sender < Block > )  -> Result < ( ) ,  SpinError >  { 
338350        match  self  { 
339-             Self :: Fixed  {  mut  idx,  blocks }  => loop  { 
351+             Self :: Fixed  { 
352+                 mut  idx,  blocks,  ..
353+             }  => loop  { 
340354                snd. blocking_send ( blocks[ idx] . clone ( ) ) ?; 
341355                idx = ( idx + 1 )  % blocks. len ( ) ; 
342356            } , 
@@ -351,7 +365,7 @@ impl Cache {
351365     #[ must_use]  
352366    pub  fn  peek_next ( & self )  -> & Block  { 
353367        match  self  { 
354-             Self :: Fixed  {  idx,  blocks }  => & blocks[ * idx] , 
368+             Self :: Fixed  {  idx,  blocks,  ..  }  => & blocks[ * idx] , 
355369        } 
356370    } 
357371
@@ -364,13 +378,73 @@ impl Cache {
364378            Self :: Fixed  { 
365379                ref  mut  idx, 
366380                blocks, 
381+                 ..
367382            }  => { 
368383                let  block = & blocks[ * idx] ; 
369384                * idx = ( * idx + 1 )  % blocks. len ( ) ; 
370385                block
371386            } 
372387        } 
373388    } 
389+ 
390+     /// Read data starting from a given offset and up to the specified size. 
391+      /// 
392+      /// # Panics 
393+      /// 
394+      /// Function will panic if reads are larger than machine word bytes wide. 
395+      pub  fn  read_at ( & self ,  offset :  u64 ,  size :  usize )  -> Bytes  { 
396+         let  mut  data = BytesMut :: with_capacity ( size) ; 
397+ 
398+         let  ( blocks,  total_cycle_size)  = match  self  { 
399+             Cache :: Fixed  { 
400+                 blocks, 
401+                 total_cycle_size, 
402+                 ..
403+             }  => ( 
404+                 blocks, 
405+                 usize:: try_from ( * total_cycle_size) 
406+                     . expect ( "cycle size larger than machine word bytes" ) , 
407+             ) , 
408+         } ; 
409+ 
410+         let  mut  remaining = size; 
411+         let  mut  current_offset =
412+             usize:: try_from ( offset) . expect ( "offset larger than machine word bytes" ) ; 
413+ 
414+         while  remaining > 0  { 
415+             // Compute offset within the cycle 
416+             let  offset_within_cycle = current_offset % total_cycle_size; 
417+ 
418+             // Find which block this offset falls into 
419+             let  mut  block_start = 0 ; 
420+             for  block in  blocks { 
421+                 let  block_size = block. total_bytes . get ( )  as  usize ; 
422+                 if  offset_within_cycle < block_start + block_size { 
423+                     // Offset is within this block 
424+                     let  block_offset = offset_within_cycle - block_start; 
425+                     let  bytes_in_block = ( block_size - block_offset) . min ( remaining) ; 
426+ 
427+                     data. extend_from_slice ( 
428+                         & block. bytes [ block_offset..block_offset + bytes_in_block] , 
429+                     ) ; 
430+ 
431+                     remaining -= bytes_in_block; 
432+                     current_offset += bytes_in_block; 
433+                     break ; 
434+                 } 
435+                 block_start += block_size; 
436+             } 
437+ 
438+             // If we couldn't find a block this suggests something seriously 
439+             // wacky has happened. 
440+             if  remaining > 0  && block_start >= total_cycle_size { 
441+                 error ! ( "Offset exceeds total cycle size" ) ; 
442+                 break ; 
443+             } 
444+         } 
445+ 
446+         data. freeze ( ) 
447+     } 
374448} 
375449
376450/// Construct a new block cache of form defined by `serializer`. 
0 commit comments