|
| 1 | +use std::{ |
| 2 | + arch::x86_64::*, |
| 3 | + mem::{transmute, zeroed}, |
| 4 | + ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, |
| 5 | +}; |
| 6 | + |
| 7 | +use arith::{Field, FieldSerde, FieldSerdeResult, SimdField}; |
| 8 | + |
| 9 | +use crate::{GF2x64, GF2}; |
| 10 | + |
| 11 | +#[derive(Debug, Clone, Copy)] |
| 12 | +pub struct AVXGF2x128 { |
| 13 | + pub v: __m128i, |
| 14 | +} |
| 15 | + |
| 16 | +impl FieldSerde for AVXGF2x128 { |
| 17 | + const SERIALIZED_SIZE: usize = 16; |
| 18 | + |
| 19 | + #[inline(always)] |
| 20 | + fn serialize_into<W: std::io::Write>(&self, mut writer: W) -> FieldSerdeResult<()> { |
| 21 | + unsafe { |
| 22 | + writer.write_all(transmute::<__m128i, [u8; Self::SERIALIZED_SIZE]>(self.v).as_ref())? |
| 23 | + }; |
| 24 | + Ok(()) |
| 25 | + } |
| 26 | + |
| 27 | + #[inline(always)] |
| 28 | + fn deserialize_from<R: std::io::Read>(mut reader: R) -> FieldSerdeResult<Self> { |
| 29 | + let mut u = [0u8; Self::SERIALIZED_SIZE]; |
| 30 | + reader.read_exact(&mut u)?; |
| 31 | + unsafe { |
| 32 | + Ok(AVXGF2x128 { |
| 33 | + v: transmute::<[u8; Self::SERIALIZED_SIZE], __m128i>(u), |
| 34 | + }) |
| 35 | + } |
| 36 | + } |
| 37 | + |
| 38 | + #[inline(always)] |
| 39 | + fn try_deserialize_from_ecc_format<R: std::io::Read>(mut reader: R) -> FieldSerdeResult<Self> { |
| 40 | + let mut u = [0u8; 32]; |
| 41 | + reader.read_exact(&mut u)?; |
| 42 | + Ok(unsafe { |
| 43 | + AVXGF2x128 { |
| 44 | + v: transmute::<[u8; 16], __m128i>(u[..16].try_into().unwrap()), |
| 45 | + } |
| 46 | + }) |
| 47 | + } |
| 48 | +} |
| 49 | + |
| 50 | +impl Field for AVXGF2x128 { |
| 51 | + const NAME: &'static str = "Galios Field 2 SIMD 128"; |
| 52 | + |
| 53 | + const SIZE: usize = 128 / 8; |
| 54 | + |
| 55 | + const FIELD_SIZE: usize = 1; // in bits |
| 56 | + |
| 57 | + const ZERO: Self = AVXGF2x128 { |
| 58 | + v: unsafe { zeroed() }, |
| 59 | + }; |
| 60 | + |
| 61 | + const ONE: Self = AVXGF2x128 { |
| 62 | + v: unsafe { transmute::<[u64; 2], __m128i>([!0u64, !0u64]) }, |
| 63 | + }; |
| 64 | + |
| 65 | + const INV_2: Self = AVXGF2x128 { |
| 66 | + v: unsafe { zeroed() }, |
| 67 | + }; // should not be used |
| 68 | + |
| 69 | + #[inline(always)] |
| 70 | + fn zero() -> Self { |
| 71 | + AVXGF2x128 { |
| 72 | + v: unsafe { zeroed() }, |
| 73 | + } |
| 74 | + } |
| 75 | + |
| 76 | + #[inline(always)] |
| 77 | + fn one() -> Self { |
| 78 | + AVXGF2x128 { |
| 79 | + v: unsafe { transmute::<[u64; 2], __m128i>([!0u64, !0u64]) }, |
| 80 | + } |
| 81 | + } |
| 82 | + |
| 83 | + #[inline(always)] |
| 84 | + fn is_zero(&self) -> bool { |
| 85 | + unsafe { transmute::<__m128i, [u8; 16]>(self.v) == [0; 16] } |
| 86 | + } |
| 87 | + |
| 88 | + #[inline(always)] |
| 89 | + fn random_unsafe(mut rng: impl rand::RngCore) -> Self { |
| 90 | + let mut u = [0u8; 16]; |
| 91 | + rng.fill_bytes(&mut u); |
| 92 | + unsafe { |
| 93 | + AVXGF2x128 { |
| 94 | + v: *(u.as_ptr() as *const __m128i), |
| 95 | + } |
| 96 | + } |
| 97 | + } |
| 98 | + |
| 99 | + #[inline(always)] |
| 100 | + fn random_bool(mut rng: impl rand::RngCore) -> Self { |
| 101 | + let mut u = [0u8; 16]; |
| 102 | + rng.fill_bytes(&mut u); |
| 103 | + unsafe { |
| 104 | + AVXGF2x128 { |
| 105 | + v: *(u.as_ptr() as *const __m128i), |
| 106 | + } |
| 107 | + } |
| 108 | + } |
| 109 | + |
| 110 | + #[inline(always)] |
| 111 | + fn exp(&self, exponent: u128) -> Self { |
| 112 | + if exponent == 0 { |
| 113 | + return Self::one(); |
| 114 | + } |
| 115 | + *self |
| 116 | + } |
| 117 | + |
| 118 | + #[inline(always)] |
| 119 | + fn inv(&self) -> Option<Self> { |
| 120 | + unimplemented!() |
| 121 | + } |
| 122 | + |
| 123 | + #[inline(always)] |
| 124 | + fn as_u32_unchecked(&self) -> u32 { |
| 125 | + unimplemented!("u32 for GF2x128 doesn't make sense") |
| 126 | + } |
| 127 | + |
| 128 | + #[inline(always)] |
| 129 | + fn from_uniform_bytes(bytes: &[u8; 32]) -> Self { |
| 130 | + unsafe { |
| 131 | + AVXGF2x128 { |
| 132 | + v: transmute::<[u8; 16], __m128i>(bytes[..16].try_into().unwrap()), |
| 133 | + } |
| 134 | + } |
| 135 | + } |
| 136 | +} |
| 137 | + |
| 138 | +impl Default for AVXGF2x128 { |
| 139 | + #[inline(always)] |
| 140 | + fn default() -> Self { |
| 141 | + Self::ZERO |
| 142 | + } |
| 143 | +} |
| 144 | + |
| 145 | +impl PartialEq for AVXGF2x128 { |
| 146 | + #[inline(always)] |
| 147 | + fn eq(&self, other: &Self) -> bool { |
| 148 | + unsafe { _mm_test_all_ones(_mm_cmpeq_epi8(self.v, other.v)) == 1 } |
| 149 | + } |
| 150 | +} |
| 151 | + |
| 152 | +impl Mul<&AVXGF2x128> for AVXGF2x128 { |
| 153 | + type Output = AVXGF2x128; |
| 154 | + |
| 155 | + #[inline(always)] |
| 156 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 157 | + fn mul(self, rhs: &AVXGF2x128) -> AVXGF2x128 { |
| 158 | + AVXGF2x128 { |
| 159 | + v: unsafe { _mm_and_si128(self.v, rhs.v) }, |
| 160 | + } |
| 161 | + } |
| 162 | +} |
| 163 | + |
| 164 | +impl Mul<AVXGF2x128> for AVXGF2x128 { |
| 165 | + type Output = AVXGF2x128; |
| 166 | + |
| 167 | + #[inline(always)] |
| 168 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 169 | + fn mul(self, rhs: AVXGF2x128) -> AVXGF2x128 { |
| 170 | + AVXGF2x128 { |
| 171 | + v: unsafe { _mm_and_si128(self.v, rhs.v) }, |
| 172 | + } |
| 173 | + } |
| 174 | +} |
| 175 | + |
| 176 | +impl MulAssign<&AVXGF2x128> for AVXGF2x128 { |
| 177 | + #[inline(always)] |
| 178 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 179 | + fn mul_assign(&mut self, rhs: &AVXGF2x128) { |
| 180 | + self.v = unsafe { _mm_and_si128(self.v, rhs.v) }; |
| 181 | + } |
| 182 | +} |
| 183 | + |
| 184 | +impl MulAssign<AVXGF2x128> for AVXGF2x128 { |
| 185 | + #[inline(always)] |
| 186 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 187 | + fn mul_assign(&mut self, rhs: AVXGF2x128) { |
| 188 | + self.v = unsafe { _mm_and_si128(self.v, rhs.v) }; |
| 189 | + } |
| 190 | +} |
| 191 | + |
| 192 | +impl Sub for AVXGF2x128 { |
| 193 | + type Output = AVXGF2x128; |
| 194 | + |
| 195 | + #[inline(always)] |
| 196 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 197 | + fn sub(self, rhs: AVXGF2x128) -> AVXGF2x128 { |
| 198 | + AVXGF2x128 { |
| 199 | + v: unsafe { _mm_xor_si128(self.v, rhs.v) }, |
| 200 | + } |
| 201 | + } |
| 202 | +} |
| 203 | + |
| 204 | +impl SubAssign for AVXGF2x128 { |
| 205 | + #[inline(always)] |
| 206 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 207 | + fn sub_assign(&mut self, rhs: AVXGF2x128) { |
| 208 | + self.v = unsafe { _mm_xor_si128(self.v, rhs.v) }; |
| 209 | + } |
| 210 | +} |
| 211 | + |
| 212 | +impl Add for AVXGF2x128 { |
| 213 | + type Output = AVXGF2x128; |
| 214 | + |
| 215 | + #[inline(always)] |
| 216 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 217 | + fn add(self, rhs: AVXGF2x128) -> AVXGF2x128 { |
| 218 | + AVXGF2x128 { |
| 219 | + v: unsafe { _mm_xor_si128(self.v, rhs.v) }, |
| 220 | + } |
| 221 | + } |
| 222 | +} |
| 223 | + |
| 224 | +impl AddAssign for AVXGF2x128 { |
| 225 | + #[inline(always)] |
| 226 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 227 | + fn add_assign(&mut self, rhs: AVXGF2x128) { |
| 228 | + self.v = unsafe { _mm_xor_si128(self.v, rhs.v) }; |
| 229 | + } |
| 230 | +} |
| 231 | + |
| 232 | +impl Add<&AVXGF2x128> for AVXGF2x128 { |
| 233 | + type Output = AVXGF2x128; |
| 234 | + |
| 235 | + #[inline(always)] |
| 236 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 237 | + fn add(self, rhs: &AVXGF2x128) -> AVXGF2x128 { |
| 238 | + AVXGF2x128 { |
| 239 | + v: unsafe { _mm_xor_si128(self.v, rhs.v) }, |
| 240 | + } |
| 241 | + } |
| 242 | +} |
| 243 | + |
| 244 | +impl AddAssign<&AVXGF2x128> for AVXGF2x128 { |
| 245 | + #[inline(always)] |
| 246 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 247 | + fn add_assign(&mut self, rhs: &AVXGF2x128) { |
| 248 | + self.v = unsafe { _mm_xor_si128(self.v, rhs.v) }; |
| 249 | + } |
| 250 | +} |
| 251 | + |
| 252 | +impl Sub<&AVXGF2x128> for AVXGF2x128 { |
| 253 | + type Output = AVXGF2x128; |
| 254 | + |
| 255 | + #[inline(always)] |
| 256 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 257 | + fn sub(self, rhs: &AVXGF2x128) -> AVXGF2x128 { |
| 258 | + AVXGF2x128 { |
| 259 | + v: unsafe { _mm_xor_si128(self.v, rhs.v) }, |
| 260 | + } |
| 261 | + } |
| 262 | +} |
| 263 | + |
| 264 | +impl SubAssign<&AVXGF2x128> for AVXGF2x128 { |
| 265 | + #[inline(always)] |
| 266 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 267 | + fn sub_assign(&mut self, rhs: &AVXGF2x128) { |
| 268 | + self.v = unsafe { _mm_xor_si128(self.v, rhs.v) }; |
| 269 | + } |
| 270 | +} |
| 271 | + |
| 272 | +impl<T: std::borrow::Borrow<AVXGF2x128>> std::iter::Sum<T> for AVXGF2x128 { |
| 273 | + fn sum<I: Iterator<Item = T>>(iter: I) -> Self { |
| 274 | + iter.fold(Self::zero(), |acc, item| acc + item.borrow()) |
| 275 | + } |
| 276 | +} |
| 277 | + |
| 278 | +impl<T: std::borrow::Borrow<AVXGF2x128>> std::iter::Product<T> for AVXGF2x128 { |
| 279 | + fn product<I: Iterator<Item = T>>(iter: I) -> Self { |
| 280 | + iter.fold(Self::one(), |acc, item| acc * item.borrow()) |
| 281 | + } |
| 282 | +} |
| 283 | + |
| 284 | +impl Neg for AVXGF2x128 { |
| 285 | + type Output = AVXGF2x128; |
| 286 | + |
| 287 | + #[inline(always)] |
| 288 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 289 | + fn neg(self) -> AVXGF2x128 { |
| 290 | + AVXGF2x128 { v: self.v } |
| 291 | + } |
| 292 | +} |
| 293 | + |
| 294 | +impl From<u32> for AVXGF2x128 { |
| 295 | + #[inline(always)] |
| 296 | + fn from(v: u32) -> Self { |
| 297 | + assert!(v < 2); |
| 298 | + if v == 0 { |
| 299 | + AVXGF2x128::ZERO |
| 300 | + } else { |
| 301 | + AVXGF2x128::ONE |
| 302 | + } |
| 303 | + } |
| 304 | +} |
| 305 | + |
| 306 | +impl From<GF2> for AVXGF2x128 { |
| 307 | + #[inline(always)] |
| 308 | + fn from(v: GF2) -> Self { |
| 309 | + assert!(v.v < 2); |
| 310 | + if v.v == 0 { |
| 311 | + AVXGF2x128::ZERO |
| 312 | + } else { |
| 313 | + AVXGF2x128::ONE |
| 314 | + } |
| 315 | + } |
| 316 | +} |
| 317 | + |
| 318 | +impl SimdField for AVXGF2x128 { |
| 319 | + type Scalar = GF2; |
| 320 | + |
| 321 | + const PACK_SIZE: usize = 128; |
| 322 | + |
| 323 | + #[inline(always)] |
| 324 | + fn scale(&self, challenge: &Self::Scalar) -> Self { |
| 325 | + if challenge.v == 0 { |
| 326 | + Self::ZERO |
| 327 | + } else { |
| 328 | + *self |
| 329 | + } |
| 330 | + } |
| 331 | + |
| 332 | + #[inline(always)] |
| 333 | + fn pack(base_vec: &[Self::Scalar]) -> Self { |
| 334 | + assert_eq!(base_vec.len(), Self::PACK_SIZE); |
| 335 | + let mut packed_to_gf2x64 = [GF2x64::ZERO; Self::PACK_SIZE / GF2x64::PACK_SIZE]; |
| 336 | + packed_to_gf2x64 |
| 337 | + .iter_mut() |
| 338 | + .zip(base_vec.chunks(GF2x64::PACK_SIZE)) |
| 339 | + .for_each(|(gf2x64, pack)| *gf2x64 = GF2x64::pack(pack)); |
| 340 | + |
| 341 | + unsafe { transmute(packed_to_gf2x64) } |
| 342 | + } |
| 343 | + |
| 344 | + #[inline(always)] |
| 345 | + fn unpack(&self) -> Vec<Self::Scalar> { |
| 346 | + let packed_to_gf2x64: [GF2x64; Self::PACK_SIZE / GF2x64::PACK_SIZE] = |
| 347 | + unsafe { transmute(*self) }; |
| 348 | + |
| 349 | + packed_to_gf2x64 |
| 350 | + .iter() |
| 351 | + .flat_map(|packed| packed.unpack()) |
| 352 | + .collect() |
| 353 | + } |
| 354 | +} |
0 commit comments