|
| 1 | +use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; |
| 2 | + |
| 3 | +use arith::{Field, FieldSerde, FieldSerdeResult, SimdField}; |
| 4 | + |
| 5 | +use super::GF2; |
| 6 | + |
| 7 | +#[derive(Debug, Clone, Copy, Default, PartialEq)] |
| 8 | +pub struct GF2x64 { |
| 9 | + pub v: u64, |
| 10 | +} |
| 11 | + |
| 12 | +impl FieldSerde for GF2x64 { |
| 13 | + const SERIALIZED_SIZE: usize = 8; |
| 14 | + |
| 15 | + #[inline(always)] |
| 16 | + fn serialize_into<W: std::io::Write>(&self, mut writer: W) -> FieldSerdeResult<()> { |
| 17 | + writer.write_all(self.v.to_le_bytes().as_ref())?; |
| 18 | + Ok(()) |
| 19 | + } |
| 20 | + |
| 21 | + #[inline(always)] |
| 22 | + fn deserialize_from<R: std::io::Read>(mut reader: R) -> FieldSerdeResult<Self> { |
| 23 | + let mut u = [0u8; Self::SERIALIZED_SIZE]; |
| 24 | + reader.read_exact(&mut u)?; |
| 25 | + Ok(GF2x64 { |
| 26 | + v: u64::from_le_bytes(u), |
| 27 | + }) |
| 28 | + } |
| 29 | + |
| 30 | + #[inline] |
| 31 | + fn try_deserialize_from_ecc_format<R: std::io::Read>(_reader: R) -> FieldSerdeResult<Self> { |
| 32 | + unimplemented!("We don't have serialization in ecc for gf2x64") |
| 33 | + } |
| 34 | +} |
| 35 | + |
| 36 | +impl Field for GF2x64 { |
| 37 | + const NAME: &'static str = "Galois Field 2 SIMD 64"; |
| 38 | + |
| 39 | + const SIZE: usize = 8; |
| 40 | + |
| 41 | + const FIELD_SIZE: usize = 64; |
| 42 | + |
| 43 | + const ZERO: Self = GF2x64 { v: 0 }; |
| 44 | + |
| 45 | + const ONE: Self = GF2x64 { v: !0u64 }; |
| 46 | + |
| 47 | + const INV_2: Self = GF2x64 { v: 0 }; // NOTE: should not be used |
| 48 | + |
| 49 | + #[inline(always)] |
| 50 | + fn zero() -> Self { |
| 51 | + GF2x64::ZERO |
| 52 | + } |
| 53 | + |
| 54 | + #[inline(always)] |
| 55 | + fn one() -> Self { |
| 56 | + GF2x64::ONE |
| 57 | + } |
| 58 | + |
| 59 | + #[inline(always)] |
| 60 | + fn is_zero(&self) -> bool { |
| 61 | + self.v == 0 |
| 62 | + } |
| 63 | + |
| 64 | + #[inline(always)] |
| 65 | + fn random_unsafe(mut rng: impl rand::RngCore) -> Self { |
| 66 | + GF2x64 { v: rng.next_u64() } |
| 67 | + } |
| 68 | + |
| 69 | + #[inline(always)] |
| 70 | + fn random_bool(mut rng: impl rand::RngCore) -> Self { |
| 71 | + GF2x64 { v: rng.next_u64() } |
| 72 | + } |
| 73 | + |
| 74 | + #[inline(always)] |
| 75 | + fn exp(&self, exponent: u128) -> Self { |
| 76 | + if exponent % 2 == 0 { |
| 77 | + Self::one() |
| 78 | + } else { |
| 79 | + *self |
| 80 | + } |
| 81 | + } |
| 82 | + |
| 83 | + #[inline(always)] |
| 84 | + fn inv(&self) -> Option<Self> { |
| 85 | + unimplemented!() |
| 86 | + } |
| 87 | + |
| 88 | + #[inline(always)] |
| 89 | + fn as_u32_unchecked(&self) -> u32 { |
| 90 | + self.v as u32 |
| 91 | + } |
| 92 | + |
| 93 | + #[inline(always)] |
| 94 | + fn from_uniform_bytes(bytes: &[u8; 32]) -> Self { |
| 95 | + let mut buf = [0u8; 8]; |
| 96 | + buf[..].copy_from_slice(&bytes[..8]); |
| 97 | + GF2x64 { |
| 98 | + v: u64::from_le_bytes(buf), |
| 99 | + } |
| 100 | + } |
| 101 | + |
| 102 | + #[inline(always)] |
| 103 | + fn mul_by_5(&self) -> Self { |
| 104 | + *self |
| 105 | + } |
| 106 | + |
| 107 | + #[inline(always)] |
| 108 | + fn mul_by_6(&self) -> Self { |
| 109 | + Self::ZERO |
| 110 | + } |
| 111 | +} |
| 112 | + |
| 113 | +impl Mul<&GF2x64> for GF2x64 { |
| 114 | + type Output = GF2x64; |
| 115 | + |
| 116 | + #[inline(always)] |
| 117 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 118 | + fn mul(self, rhs: &GF2x64) -> Self::Output { |
| 119 | + GF2x64 { v: self.v & rhs.v } |
| 120 | + } |
| 121 | +} |
| 122 | + |
| 123 | +impl Mul<GF2x64> for GF2x64 { |
| 124 | + type Output = GF2x64; |
| 125 | + |
| 126 | + #[inline(always)] |
| 127 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 128 | + fn mul(self, rhs: GF2x64) -> GF2x64 { |
| 129 | + GF2x64 { v: self.v & rhs.v } |
| 130 | + } |
| 131 | +} |
| 132 | + |
| 133 | +impl MulAssign<&GF2x64> for GF2x64 { |
| 134 | + #[inline(always)] |
| 135 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 136 | + fn mul_assign(&mut self, rhs: &GF2x64) { |
| 137 | + self.v &= rhs.v; |
| 138 | + } |
| 139 | +} |
| 140 | + |
| 141 | +impl MulAssign<GF2x64> for GF2x64 { |
| 142 | + #[inline(always)] |
| 143 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 144 | + fn mul_assign(&mut self, rhs: GF2x64) { |
| 145 | + self.v &= rhs.v; |
| 146 | + } |
| 147 | +} |
| 148 | + |
| 149 | +impl Sub for GF2x64 { |
| 150 | + type Output = GF2x64; |
| 151 | + |
| 152 | + #[inline(always)] |
| 153 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 154 | + fn sub(self, rhs: GF2x64) -> GF2x64 { |
| 155 | + GF2x64 { v: self.v ^ rhs.v } |
| 156 | + } |
| 157 | +} |
| 158 | + |
| 159 | +impl SubAssign for GF2x64 { |
| 160 | + #[inline(always)] |
| 161 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 162 | + fn sub_assign(&mut self, rhs: GF2x64) { |
| 163 | + self.v ^= rhs.v; |
| 164 | + } |
| 165 | +} |
| 166 | + |
| 167 | +impl Add for GF2x64 { |
| 168 | + type Output = GF2x64; |
| 169 | + |
| 170 | + #[inline(always)] |
| 171 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 172 | + fn add(self, rhs: GF2x64) -> GF2x64 { |
| 173 | + GF2x64 { v: self.v ^ rhs.v } |
| 174 | + } |
| 175 | +} |
| 176 | + |
| 177 | +impl AddAssign for GF2x64 { |
| 178 | + #[inline(always)] |
| 179 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 180 | + fn add_assign(&mut self, rhs: GF2x64) { |
| 181 | + self.v ^= rhs.v; |
| 182 | + } |
| 183 | +} |
| 184 | + |
| 185 | +impl Add<&GF2x64> for GF2x64 { |
| 186 | + type Output = GF2x64; |
| 187 | + |
| 188 | + #[inline(always)] |
| 189 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 190 | + fn add(self, rhs: &GF2x64) -> GF2x64 { |
| 191 | + GF2x64 { v: self.v ^ rhs.v } |
| 192 | + } |
| 193 | +} |
| 194 | + |
| 195 | +impl Sub<&GF2x64> for GF2x64 { |
| 196 | + type Output = GF2x64; |
| 197 | + |
| 198 | + #[inline(always)] |
| 199 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 200 | + fn sub(self, rhs: &GF2x64) -> GF2x64 { |
| 201 | + GF2x64 { v: self.v ^ rhs.v } |
| 202 | + } |
| 203 | +} |
| 204 | + |
| 205 | +impl<T: std::borrow::Borrow<GF2x64>> std::iter::Sum<T> for GF2x64 { |
| 206 | + fn sum<I: Iterator<Item = T>>(iter: I) -> Self { |
| 207 | + iter.fold(Self::zero(), |acc, item| acc + item.borrow()) |
| 208 | + } |
| 209 | +} |
| 210 | + |
| 211 | +impl<T: std::borrow::Borrow<GF2x64>> std::iter::Product<T> for GF2x64 { |
| 212 | + fn product<I: Iterator<Item = T>>(iter: I) -> Self { |
| 213 | + iter.fold(Self::one(), |acc, item| acc * item.borrow()) |
| 214 | + } |
| 215 | +} |
| 216 | + |
| 217 | +impl Neg for GF2x64 { |
| 218 | + type Output = GF2x64; |
| 219 | + |
| 220 | + #[inline(always)] |
| 221 | + #[allow(clippy::suspicious_arithmetic_impl)] |
| 222 | + fn neg(self) -> GF2x64 { |
| 223 | + GF2x64 { v: self.v } |
| 224 | + } |
| 225 | +} |
| 226 | + |
| 227 | +impl AddAssign<&GF2x64> for GF2x64 { |
| 228 | + #[inline(always)] |
| 229 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 230 | + fn add_assign(&mut self, rhs: &GF2x64) { |
| 231 | + self.v ^= rhs.v; |
| 232 | + } |
| 233 | +} |
| 234 | + |
| 235 | +impl SubAssign<&GF2x64> for GF2x64 { |
| 236 | + #[inline(always)] |
| 237 | + #[allow(clippy::suspicious_op_assign_impl)] |
| 238 | + fn sub_assign(&mut self, rhs: &GF2x64) { |
| 239 | + self.v ^= rhs.v; |
| 240 | + } |
| 241 | +} |
| 242 | + |
| 243 | +impl From<u32> for GF2x64 { |
| 244 | + #[inline(always)] |
| 245 | + fn from(v: u32) -> Self { |
| 246 | + assert!(v < 2); |
| 247 | + if v == 0 { |
| 248 | + GF2x64 { v: 0 } |
| 249 | + } else { |
| 250 | + GF2x64 { v: !0u64 } |
| 251 | + } |
| 252 | + } |
| 253 | +} |
| 254 | + |
| 255 | +impl From<GF2> for GF2x64 { |
| 256 | + #[inline(always)] |
| 257 | + fn from(v: GF2) -> Self { |
| 258 | + assert!(v.v < 2); |
| 259 | + if v.v == 0 { |
| 260 | + GF2x64 { v: 0 } |
| 261 | + } else { |
| 262 | + GF2x64 { v: !0u64 } |
| 263 | + } |
| 264 | + } |
| 265 | +} |
| 266 | + |
| 267 | +impl SimdField for GF2x64 { |
| 268 | + #[inline(always)] |
| 269 | + fn scale(&self, challenge: &Self::Scalar) -> Self { |
| 270 | + if challenge.v == 0 { |
| 271 | + Self::zero() |
| 272 | + } else { |
| 273 | + *self |
| 274 | + } |
| 275 | + } |
| 276 | + |
| 277 | + #[inline(always)] |
| 278 | + fn pack(base_vec: &[Self::Scalar]) -> Self { |
| 279 | + assert!(base_vec.len() == Self::PACK_SIZE); |
| 280 | + let mut ret = 0u64; |
| 281 | + for (i, scalar) in base_vec.iter().enumerate() { |
| 282 | + ret |= (scalar.v as u64) << (Self::PACK_SIZE - 1 - i); |
| 283 | + } |
| 284 | + Self { v: ret } |
| 285 | + } |
| 286 | + |
| 287 | + #[inline(always)] |
| 288 | + fn unpack(&self) -> Vec<Self::Scalar> { |
| 289 | + let mut ret = vec![]; |
| 290 | + for i in 0..Self::PACK_SIZE { |
| 291 | + ret.push(Self::Scalar { |
| 292 | + v: ((self.v >> (Self::PACK_SIZE - 1 - i)) & 1u64) as u8, |
| 293 | + }); |
| 294 | + } |
| 295 | + ret |
| 296 | + } |
| 297 | + |
| 298 | + type Scalar = crate::GF2; |
| 299 | + |
| 300 | + const PACK_SIZE: usize = 64; |
| 301 | +} |
0 commit comments