|
| 1 | +extern crate alloc; |
| 2 | + |
| 3 | +const DIGEST_BYTES: usize = 32; |
| 4 | +const DIGEST_WORDS: usize = DIGEST_BYTES / 4; |
| 5 | +const BLOCK_BYTES: usize = DIGEST_BYTES * 2; |
| 6 | +const BLOCK_WORDS: usize = DIGEST_WORDS * 2; |
| 7 | + |
| 8 | +extern "C" { |
| 9 | + fn sys_sha_buffer( |
| 10 | + out_state: *mut [u32; DIGEST_WORDS], |
| 11 | + in_state: *const [u32; DIGEST_WORDS], |
| 12 | + buf: *const u8, |
| 13 | + count: u32, |
| 14 | + ); |
| 15 | +} |
| 16 | + |
| 17 | +use alloc::vec::Vec; |
| 18 | + |
| 19 | +#[inline(always)] |
| 20 | +fn compress_words(state: &mut [u32; DIGEST_WORDS], blocks: &[[u32; BLOCK_WORDS]]) { |
| 21 | + unsafe { |
| 22 | + sys_sha_buffer( |
| 23 | + state, |
| 24 | + state, |
| 25 | + blocks.as_ptr() as *const u8, |
| 26 | + blocks.len() as u32, |
| 27 | + ); |
| 28 | + } |
| 29 | +} |
| 30 | + |
| 31 | +// When the blocks are unaligned they must be copied in order to align them on a u32 word boundary |
| 32 | +// before they can be passed to sys_sha_buffer. This function does this, allocating a new Vec. |
| 33 | +fn read_unaligned_blocks(blocks: &[[u8; BLOCK_BYTES]]) -> Vec<[u32; BLOCK_WORDS]> { |
| 34 | + blocks |
| 35 | + .iter() |
| 36 | + .map(|block| unsafe { (block.as_ptr() as *const [u32; BLOCK_WORDS]).read_unaligned() }) |
| 37 | + .collect::<Vec<_>>() |
| 38 | +} |
| 39 | + |
| 40 | +/// SHA-256 compress implementation which calls into the RISZ Zero SHA-256 accelerator circuit. |
| 41 | +/// Based on https://github.com/risc0/risc0/tree/main/risc0/zkvm/src/guest/sha.rs |
| 42 | +#[inline] |
| 43 | +pub fn compress(state: &mut [u32; DIGEST_WORDS], blocks: &[[u8; BLOCK_BYTES]]) { |
| 44 | + // On little-endian architectures, flip from big-endian to little-endian. |
| 45 | + // RISC Zero expects the state to always be big-endian, wheras this crate uses native-endian. |
| 46 | + for word in state.iter_mut() { |
| 47 | + *word = word.to_be(); |
| 48 | + } |
| 49 | + |
| 50 | + // If aligned, reinterpret the u8 array blocks as u32 array blocks. |
| 51 | + // If unaligned, the data needs to be copied. |
| 52 | + match unsafe { blocks.align_to::<[u32; BLOCK_WORDS]>() } { |
| 53 | + (&[], aligned_blocks, &[]) => compress_words(state, aligned_blocks), |
| 54 | + _ => compress_words(state, &read_unaligned_blocks(&blocks)), |
| 55 | + }; |
| 56 | + |
| 57 | + // On little-endian architectures, flip from big-endian to little-endian. |
| 58 | + for word in state.iter_mut() { |
| 59 | + *word = word.to_be(); |
| 60 | + } |
| 61 | +} |
0 commit comments