Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 4 additions & 7 deletions crates/provers/plonk/src/constraint_system/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,13 +193,10 @@ where
&self,
values: &HashMap<Variable, FieldElement<F>>,
) -> Vec<FieldElement<F>> {
let mut public_inputs = Vec::new();
for key in &self.public_input_variables {
if let Some(value) = values.get(key) {
public_inputs.push(value.clone());
}
}
public_inputs
self.public_input_variables
.iter()
.filter_map(|key| values.get(key).cloned())
.collect()
}
}

Expand Down
8 changes: 4 additions & 4 deletions crates/provers/plonk/src/constraint_system/solver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,18 @@ where
mut assignments: HashMap<Variable, FE<F>>,
) -> Result<HashMap<Variable, FE<F>>, SolverError> {
loop {
let old_solved = assignments.keys().len();
for constraint in self.constraints.iter() {
let old_solved = assignments.len();
for constraint in &self.constraints {
assignments = solve_hint(assignments, constraint);
assignments = solve_constraint(assignments, constraint);
}
if old_solved == assignments.keys().len() {
if old_solved == assignments.len() {
break;
}
}

// Check the system is solved
for constraint in self.constraints.iter() {
for constraint in &self.constraints {
let a = assignments.get(&constraint.l);
let b = assignments.get(&constraint.r);
let c = assignments.get(&constraint.o);
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correctness

  • The changes involve iteration over constraints with assignments. Ensure that solve_hint and solve_constraint are correctly transforming the assignments, maintaining invariant properties for each constraint.
  • Consider edge cases where assignments may initially be empty or when constraints have zero or identity elements which might produce unexpected results.

Security

  • Verify if solve_hint and solve_constraint operate in constant-time, especially if handling secret data to prevent timing side-channel attacks.
  • Check if sensitive data within assignments requires zeroization after processing.

Performance

  • Iterating over all constraints for each assignment might be inefficient depending on the size of self.constraints. Consider breaking out early if possible on each iteration after an assignment changes.

Bugs & Errors

  • Confirm that Variable identifiers are correctly handled and that any possible conversion or retrieval of hash map values does not panic (e.g., due to missing constraints or invalid keys).

Code Simplicity

  • Current loop structure with assignments = solve_hint(assignments, constraint); and assignments = solve_constraint(assignments, constraint); may introduce redundancy. If possible, merging these processes could simplify the implementation, reducing cognitive load.

Overall Comment

Ensure thorough testing for edge cases and data-related issues. Verify if a more efficient constraint solving methodology can be implemented to enhance performance without compromising correctness and security.

Expand Down
112 changes: 87 additions & 25 deletions crates/provers/plonk/src/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,28 +334,41 @@ where
gamma: FieldElement<F>,
) -> Round2Result<F, CS::Commitment> {
let cpi = common_preprocessed_input;
let mut coefficients: Vec<FieldElement<F>> = vec![FieldElement::one()];
let (s1, s2, s3) = (&cpi.s1_lagrange, &cpi.s2_lagrange, &cpi.s3_lagrange);

let k2 = &cpi.k1 * &cpi.k1;

let lp = |w: &FieldElement<F>, eta: &FieldElement<F>| w + &beta * eta + &gamma;

for i in 0..&cpi.n - 1 {
// Compute all numerators and denominators first.
// We need n-1 factors to compute n coefficients: z[0]=1, z[i+1]=z[i]*factor[i] for i in 0..n-1.
// This matches the original loop range `0..cpi.n - 1`.
let n_minus_1 = cpi.n - 1;
let mut numerators = Vec::with_capacity(n_minus_1);
let mut denominators = Vec::with_capacity(n_minus_1);

for i in 0..n_minus_1 {
let (a_i, b_i, c_i) = (&witness.a[i], &witness.b[i], &witness.c[i]);
let num = lp(a_i, &cpi.domain[i])
* lp(b_i, &(&cpi.domain[i] * &cpi.k1))
* lp(c_i, &(&cpi.domain[i] * &k2));
let den = lp(a_i, &s1[i]) * lp(b_i, &s2[i]) * lp(c_i, &s3[i]);
// den != 0 with overwhelming probability because beta and gamma are random elements.
let new_factor = (num / den).expect(
"division by zero in permutation polynomial: beta and gamma should prevent this",
);

let new_term = coefficients
.last()
.expect("coefficients vector is non-empty")
* &new_factor;
numerators.push(num);
denominators.push(den);
}

// Batch invert all denominators at once (much faster than n-1 individual inversions)
FieldElement::inplace_batch_inverse(&mut denominators).expect(
"batch inversion failed in permutation polynomial: beta and gamma should prevent zeros",
);

// Compute coefficients using the inverted denominators
let mut coefficients: Vec<FieldElement<F>> = Vec::with_capacity(cpi.n);
coefficients.push(FieldElement::one());

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correctness

  • Primitive Root Assumptions: You correctly assert the existence of a primitive root using expect, but ensure that the assumptions about the degree calculations hold under all cases, especially related to trailing zeros and field limits.
  • Edge Cases: Ensure that assumptions about degree == 4 * n are valid for all input scenarios beyond just the constraints in the comments. Changes in input domain characteristics could lead to edge cases being improperly handled.

Security

  • Timing Side-Channels: Code involving field elements often needs to be constant time. Ensure that additions using + and multiplications with * on field elements are constant time to avoid leaks.
  • SAFETY Assertions: Assertions are used for optimization checks, but consider using them in a way that does not leak information in a release build, or ensure they compile out entirely, depending how critical they could be as attack vectors.

Performance

  • Unnecessary Allocations: The direct use of Vec::with_capacity(n_minus_1) can be justified if filled without extraneous allocations. After ensuring correctness, look for other parts of the code that might perform unnecessary allocations tightly connected with mathematical operations.

Bugs & Errors

  • Panics and Debug Assertions: Although debug assertions are useful, they should not reach a production code path that could panic under an invalid constraint. Consider a strategy to gracefully handle these conditions.
  • Potential Panics with expect: While you use expect in multiple places, ensure that the message clearly describes the failure context and, if possible, check those conditions upfront to avoid panics.

Code Simplicity

  • Complex Assertions: Perhaps consider encapsulating complex checks like the degree match into helper functions, potentially improving code readability and reuse.

Overall, assumptions in mathematical operations need further scrutiny to ensure that the constraints and debug assertions align with real-world and adversarial inputs. Consider potential refactorings or performance enhancements after addressing foundational issues.

for i in 0..n_minus_1 {
let factor = &numerators[i] * &denominators[i];
let new_term = coefficients.last().expect("coefficients non-empty") * &factor;
coefficients.push(new_term);
}

Expand Down Expand Up @@ -386,10 +399,6 @@ where
let cpi = common_preprocessed_input;
let k2 = &cpi.k1 * &cpi.k1;

let one = Polynomial::new_monomial(FieldElement::one(), 0);
let p_x = &Polynomial::new_monomial(FieldElement::<F>::one(), 1);
let zh = Polynomial::new_monomial(FieldElement::<F>::one(), cpi.n) - &one;

let z_x_omega_coefficients: Vec<FieldElement<F>> = p_z
.coefficients()
.iter()
Expand Down Expand Up @@ -430,8 +439,25 @@ where
.expect("FFT evaluation of qc must be within field's two-adicity limit");
let p_pi_eval = Polynomial::evaluate_offset_fft(&p_pi, 1, Some(degree), offset)
.expect("FFT evaluation of p_pi must be within field's two-adicity limit");
let p_x_eval = Polynomial::evaluate_offset_fft(p_x, 1, Some(degree), offset)
.expect("FFT evaluation of p_x must be within field's two-adicity limit");
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correctness

  • The code makes several assumptions about the existence of primitive roots which are verified using expect. While the debug assertions help ensure these assumptions hold, they can lead to panics in release builds if the conditions change. Consider replacing these with proper error handling or ensuring that the assumptions are always validated.
  • The safety comment regarding the assumption degree == 4 * n should be clearly documented and verified elsewhere to ensure this invariant is maintained across future code changes.

Security

  • There are no measures mentioned regarding cryptographically secure randomness, which may be important depending on where and how this code is used.
  • There is no explicit mention of constant-time operations or handling of secret values without timing side-channels. Ensure that sensitive operations do not introduce timing attacks.
  • The provided code does not indicate handling of zeroization of sensitive data.

Performance

  • The use of debug assertions is a good practice, but also consider adding release-build checks or optimizations that do not rely solely on debug-build diagnostics.
  • Ensure that unnecessary allocations are minimized especially in high-demand functions such as FFT evaluations.

Bugs & Errors

  • The use of expect statements can lead to panics which should be avoided especially in a cryptographic library.
  • Relying on panic behavior is risky; consider using a more robust error handling mechanism.

Code Simplicity

  • The explanations within the comments are informative and help in understanding the logic of the implementation. Be mindful that these assumptions and optimizations are correctly documented in user-facing documentation as well.

Consider addressing the above issues before merging to ensure robustness and security of the cryptographic operations.


// Optimization: p_x = X (identity polynomial), so p_x(offset * ω^i) = offset * ω^i.
// Generate the coset directly instead of using FFT.
// Note: This uses the same primitive root that evaluate_offset_fft uses internally,
// since both derive it from F::get_primitive_root_of_unity for the same domain size.
let omega = F::get_primitive_root_of_unity(degree.trailing_zeros() as u64)
.expect("primitive root exists for degree");
let p_x_eval: Vec<_> = (0..degree)
.scan(offset.clone(), |current, _| {
let val = current.clone();
*current = &*current * &omega;
Some(val)
})
.collect();
debug_assert_eq!(
p_x_eval.len(),
p_a_eval.len(),
"p_x_eval length must match FFT evaluation length"
);
let p_z_eval = Polynomial::evaluate_offset_fft(p_z, 1, Some(degree), offset)
.expect("FFT evaluation of p_z must be within field's two-adicity limit");
let p_z_x_omega_eval = Polynomial::evaluate_offset_fft(&z_x_omega, 1, Some(degree), offset)
Expand Down Expand Up @@ -505,11 +531,38 @@ where
.map(|((p2, p1), co)| (p2 * &alpha + p1) * &alpha + co)
.collect();

let mut zh_eval = Polynomial::evaluate_offset_fft(&zh, 1, Some(degree), offset).expect(
"FFT evaluation of vanishing polynomial must be within field's two-adicity limit",
// Optimization: Z_H(x) = x^n - 1 has only 4 distinct values on a coset of size 4n.
// On coset {offset * ω^i : i = 0..4n-1} where ω is primitive 4n-th root:
// Z_H(offset * ω^i) = offset^n * (ω^n)^i - 1
// Since ω^n is a 4th root of unity, (ω^n)^i cycles through 4 values.
//
// SAFETY: This optimization assumes degree == 4 * n. If degree changes (see TODO above),
// this optimization must be revisited.
debug_assert_eq!(
degree,
4 * cpi.n,
"Z_H optimization requires degree == 4n; if degree formula changes, update this code"
);
Comment on lines +572 to 576
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Debug-only assumption check

degree == 4 * cpi.n is only enforced with debug_assert_eq!, so in release builds this optimization can silently produce incorrect Z_H evaluations if degree ever changes (the surrounding code even has a TODO about the factor of 4). This should be a real assert_eq! (or a checked branch that falls back to the FFT path) so invalid proofs can’t be produced in optimized builds.

Prompt To Fix With AI
This is a comment left during a code review.
Path: crates/provers/plonk/src/prover.rs
Line: 541:545

Comment:
**Debug-only assumption check**

`degree == 4 * cpi.n` is only enforced with `debug_assert_eq!`, so in release builds this optimization can silently produce incorrect `Z_H` evaluations if `degree` ever changes (the surrounding code even has a TODO about the factor of 4). This should be a real `assert_eq!` (or a checked branch that falls back to the FFT path) so invalid proofs can’t be produced in optimized builds.

How can I resolve this? If you propose a fix, please make it concise.

FieldElement::inplace_batch_inverse(&mut zh_eval)
.expect("vanishing polynomial evaluations are non-zero because evaluated on coset offset from the roots of unity");
let omega_4n = F::get_primitive_root_of_unity(degree.trailing_zeros() as u64)
.expect("primitive root exists for degree");
let omega_n = omega_4n.pow(cpi.n as u64); // ω^n where ω is 4n-th root; this is a 4th root of unity
let offset_to_n = offset.pow(cpi.n as u64);

// Compute the 4 distinct Z_H values and their inverses
// Use multiplication chain for small powers (faster than pow)
let omega_n_sq = &omega_n * &omega_n;
let omega_n_cubed = &omega_n_sq * &omega_n;
let mut zh_base = [
&offset_to_n - FieldElement::<F>::one(), // i ≡ 0 (mod 4)
&offset_to_n * &omega_n - FieldElement::<F>::one(), // i ≡ 1 (mod 4)
&offset_to_n * &omega_n_sq - FieldElement::<F>::one(), // i ≡ 2 (mod 4)
&offset_to_n * &omega_n_cubed - FieldElement::<F>::one(), // i ≡ 3 (mod 4)
];
FieldElement::inplace_batch_inverse(&mut zh_base)
.expect("Z_H evaluations are non-zero on coset offset from roots of unity");

// Build full evaluation vector by cycling through the 4 values
let zh_eval: Vec<_> = (0..degree).map(|i| zh_base[i % 4].clone()).collect();
let c: Vec<_> = p_eval
Comment on lines 565 to 597
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Z_H shortcut assumes degree=4n

The new Z_H evaluation shortcut relies on degree = 4 * cpi.n so that omega_4n is a primitive 4n-th root and omega_n = omega_4n.pow(n) is a 4th root of unity. This is only valid under that exact relationship and when degree is exactly 4n in the FFT calls. If degree changes (the code even has a TODO about “factor of 4”), or if evaluate_offset_fft internally uses a different root/order, then zh_eval will be wrong and the quotient t will be computed incorrectly.

At minimum, add an assertion that degree == 4 * cpi.n here (and/or compute omega_n from the actual evaluation root used by the FFT helper), so the optimization can’t silently produce invalid proofs when degree is adjusted.

Prompt To Fix With AI
This is a comment left during a code review.
Path: crates/provers/plonk/src/prover.rs
Line: 526:550

Comment:
**Z_H shortcut assumes degree=4n**

The new `Z_H` evaluation shortcut relies on `degree = 4 * cpi.n` so that `omega_4n` is a primitive `4n`-th root and `omega_n = omega_4n.pow(n)` is a 4th root of unity. This is only valid under that exact relationship and when `degree` is exactly `4n` in the FFT calls. If `degree` changes (the code even has a TODO about “factor of 4”), or if `evaluate_offset_fft` internally uses a different root/order, then `zh_eval` will be wrong and the quotient `t` will be computed incorrectly.

At minimum, add an assertion that `degree == 4 * cpi.n` here (and/or compute `omega_n` from the actual evaluation root used by the FFT helper), so the optimization can’t silently produce invalid proofs when `degree` is adjusted.

How can I resolve this? If you propose a fix, please make it concise.

.iter()
.zip(zh_eval.iter())
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correctness:

  1. Mathematical Operations:

    • In lp lambda, ensure that operations involving beta and gamma are performed modulo the field order. Ensure that the field operations are sound and the modular arithmetic is respected.
  2. Edge Cases:

    • The code relies on beta and gamma to prevent zero denominators for the permutation polynomial. Consider handling exceptional cases where these conditions might not be satisfied, especially when relying on randomness.
  3. Polynomial and FFT Implementations:

    • The optimizations in evaluating p_x and Z_H directly via coset arithmetic are sound. However, ensure there's no rounding error or omissions in coefficients calculations.

Security:

  1. Timing Side-Channels:

    • The operations appear to be constant time, but ensure the division and inversion operations respect constant-time constraints.
  2. Zeroization of Sensitive Data:

    • There is no explicit zeroization or sensitive data management observed. Ensure that sensitive variables are properly zeroized post-use.
  3. Cryptographically Secure Randomness:

    • Random values like beta and gamma need a secure source. Ensure a cryptographically secure randomness source is used.
  4. Secret-dependent Branching:

    • The code doesn't seem to have any secret-dependent branching.
  5. Hash Function Domain Separation:

    • No hash functions are shown in the snippets, ensure proper domain separation is used were applicable.

Performance:

  1. Unnecessary Allocations:

    • Good use of pre-allocation in vectors.
  2. Redundant Field Inversions:

    • Efficient batch inversion utilized to prevent redundant operations; well-done.
  3. MSM and FFT Efficiency:

    • Recheck the assumptions for degree and root calculations to ensure no off-by-one errors.

Bugs & Errors:

  1. Potential Panics or Unwraps:

    • expect usages could lead to panics if assumptions fail. Handle these carefully or ensure robust checks before.
  2. Memory Safety Issues:

    • Ensure no unsafe memory operations are performed without proper checks (not shown in provided snippet).
  3. Off-by-one Errors:

    • Check iterations and index usage, especially around degree and n calculations.
  4. Integer Overflow/Underflow:

    • Check the field element scalar multiplication and power operations for potential overflow, especially in custom operations.

Code Simplicity:

  1. Overly Complex Implementations:

    • Optimization is great but may introduce complexity. Ensure thorough testing and documentation.
  2. Duplicated Code:

    • None noted; efficient reuse of logic observed, but ensure it's clearly documented.
  3. Poor Abstractions:

    • Consider abstracting pattern of batch inversions and FFT evaluations for reusability.

Conclusion:

The code appears to be moving towards performant and efficient optimization but has some concerns around robustness against unexpected cases and randomness guarantees. The reliance on expect for critical paths needs revisiting. Address these issues before merging.

Expand Down Expand Up @@ -583,11 +636,19 @@ where
let (r1, r2, r3, r4) = (round_1, round_2, round_3, round_4);
// Precompute variables
let k2 = &cpi.k1 * &cpi.k1;
let zeta_raised_n = Polynomial::new_monomial(r4.zeta.pow(cpi.n + 2), 0); // TODO: Paper says n and 2n, but Gnark uses n+2 and 2n+4
let zeta_raised_2n = Polynomial::new_monomial(r4.zeta.pow(2 * cpi.n + 4), 0);

// Compute zeta powers efficiently: zeta^n, zeta^(n+2), zeta^(2n+4)
// Start with zeta^n, then derive others to avoid redundant exponentiations
let zeta_n = r4.zeta.pow(cpi.n as u64);
let zeta_sq = &r4.zeta * &r4.zeta;
let zeta_n_plus_2 = &zeta_n * &zeta_sq; // zeta^(n+2) = zeta^n * zeta^2
let zeta_2n_plus_4 = &zeta_n_plus_2 * &zeta_n_plus_2; // zeta^(2n+4) = (zeta^(n+2))^2

let zeta_raised_n = Polynomial::new_monomial(zeta_n_plus_2, 0); // TODO: Paper says n and 2n, but Gnark uses n+2 and 2n+4
let zeta_raised_2n = Polynomial::new_monomial(zeta_2n_plus_4, 0);

// zeta is sampled outside the set of roots of unity so zeta != 1, and n != 0.
let l1_zeta = ((&r4.zeta.pow(cpi.n as u64) - FieldElement::<F>::one())
let l1_zeta = ((&zeta_n - FieldElement::<F>::one())
/ ((&r4.zeta - FieldElement::<F>::one()) * FieldElement::<F>::from(cpi.n as u64)))
.expect("zeta is outside roots of unity so denominator is non-zero");

Expand All @@ -606,10 +667,11 @@ where
* &r2.beta
* &r4.z_zeta_omega
* &cpi.s3;
let alpha_squared = &r3.alpha * &r3.alpha;
p_non_constant += (r_2_2 - r_2_1) * &r3.alpha;

let r_3 = &r2.p_z * l1_zeta;
p_non_constant += r_3 * &r3.alpha * &r3.alpha;
p_non_constant += r_3 * &alpha_squared;

let partial_t = &r3.p_t_lo + zeta_raised_n * &r3.p_t_mid + zeta_raised_2n * &r3.p_t_hi;

Expand Down
55 changes: 40 additions & 15 deletions crates/provers/plonk/src/test_utils/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,20 +34,28 @@ pub fn test_srs(n: usize) -> StructuredReferenceString<G1Point, G2Point> {
let g1 = <BLS12381Curve as IsEllipticCurve>::generator();
let g2 = <BLS12381TwistCurve as IsEllipticCurve>::generator();

// Use iterative multiplication instead of pow() for efficiency
let powers_main_group: Vec<G1Point> = (0..n + 3)
.map(|exp| g1.operate_with_self(s.pow(exp as u64).canonical()))
.scan(FrElement::one(), |s_power, _| {
let result = g1.operate_with_self(s_power.canonical());
*s_power = &*s_power * &s;
Some(result)
})
.collect();
let powers_secondary_group = [g2.clone(), g2.operate_with_self(s.canonical())];

StructuredReferenceString::new(&powers_main_group, &powers_secondary_group)
}

/// Generates a domain to interpolate: 1, omega, omega², ..., omega^size
/// Generates a domain to interpolate: 1, omega, omega², ..., omega^(size-1)
pub fn generate_domain<F: IsField>(omega: &FieldElement<F>, size: usize) -> Vec<FieldElement<F>> {
(1..size).fold(vec![FieldElement::one()], |mut acc, _| {
acc.push(acc.last().unwrap() * omega);
acc
})
(0..size)
.scan(FieldElement::one(), |power, _| {
let result = power.clone();
*power = &*power * omega;
Some(result)
})
.collect()
}

/// Generates the permutation coefficients for the copy constraints.
Expand All @@ -59,10 +67,11 @@ pub fn generate_permutation_coefficients<F: IsField>(
order_r_minus_1_root_unity: &FieldElement<F>,
) -> Vec<FieldElement<F>> {
let identity = identity_permutation(omega, n, order_r_minus_1_root_unity);
let permuted: Vec<FieldElement<F>> = (0..n * 3)
.map(|i| identity[permutation[i]].clone())
.collect();
permuted
permutation
.iter()
.take(n * 3)
.map(|&i| identity[i].clone())
.collect()
}

/// The identity permutation, auxiliary function to generate the copy constraints.
Expand All @@ -72,11 +81,27 @@ fn identity_permutation<F: IsField>(
order_r_minus_1_root_unity: &FieldElement<F>,
) -> Vec<FieldElement<F>> {
let u = order_r_minus_1_root_unity;
let mut result: Vec<FieldElement<F>> = vec![];
for index_column in 0..=2 {
for index_row in 0..n {
result.push(w.pow(index_row) * u.pow(index_column as u64));
}
let u_sq = u * u;

// Precompute w^i for i in 0..n using iterative multiplication
let w_powers: Vec<_> = (0..n)
.scan(FieldElement::one(), |w_power, _| {
let result = w_power.clone();
*w_power = &*w_power * w;
Some(result)
})
.collect();

// Build result: [w^i * u^0, w^i * u^1, w^i * u^2] for each column
let mut result = Vec::with_capacity(3 * n);
for w_i in &w_powers {
result.push(w_i.clone());
}
for w_i in &w_powers {
result.push(w_i * u);
}
for w_i in &w_powers {
result.push(w_i * &u_sq);
}
result
}
Expand Down
27 changes: 15 additions & 12 deletions crates/provers/plonk/src/verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -326,17 +326,26 @@ impl<F: IsField + IsFFTField + HasDefaultTranscript, CS: IsCommitmentScheme<F>>
FieldElement<F>: ByteConversion,
{
let [beta, gamma, alpha, zeta, upsilon] = self.compute_challenges(p, vk, public_input);
let zh_zeta = zeta.pow(input.n) - FieldElement::<F>::one();

let k1 = &input.k1;
let k2 = k1 * k1;

// Precompute zeta powers efficiently
let zeta_n = zeta.pow(input.n as u64);
let zeta_sq = &zeta * &zeta;
let zeta_n_plus_2 = &zeta_n * &zeta_sq;
let zeta_2n_plus_4 = &zeta_n_plus_2 * &zeta_n_plus_2;
let zh_zeta = &zeta_n - FieldElement::<F>::one();

// We are using that zeta != 0 because is sampled outside the set of roots of unity,
// and n != 0 because is the length of the trace.
let l1_zeta = ((zeta.pow(input.n as u64) - FieldElement::<F>::one())
let l1_zeta = (&zh_zeta
/ ((&zeta - FieldElement::<F>::one()) * FieldElement::from(input.n as u64)))
.expect("zeta is outside roots of unity so denominator is non-zero");

// Precompute alpha^2 for reuse
let alpha_squared = &alpha * &alpha;

// Use the following equality to compute PI(ζ)
// without interpolating:
// Lᵢ₊₁ = ω Lᵢ (X − ωⁱ) / (X − ωⁱ⁺¹)
Expand All @@ -362,7 +371,7 @@ impl<F: IsField + IsFFTField + HasDefaultTranscript, CS: IsCommitmentScheme<F>>
* (&p.c_zeta + &gamma)
* (&p.a_zeta + &beta * &p.s1_zeta + &gamma)
* (&p.b_zeta + &beta * &p.s2_zeta + &gamma);
p_constant_zeta = p_constant_zeta - &l1_zeta * &alpha * &alpha;
p_constant_zeta = p_constant_zeta - &l1_zeta * &alpha_squared;
p_constant_zeta += p_pi_zeta;

let p_zeta = p_constant_zeta + &p.p_non_constant_zeta;
Expand All @@ -372,14 +381,8 @@ impl<F: IsField + IsFFTField + HasDefaultTranscript, CS: IsCommitmentScheme<F>>
// Compute commitment of partial evaluation of t (p = zh * t)
let partial_t_1 = p
.t_lo_1
.operate_with(
&p.t_mid_1
.operate_with_self(zeta.pow(input.n + 2).canonical()),
)
.operate_with(
&p.t_hi_1
.operate_with_self(zeta.pow(2 * input.n + 4).canonical()),
);
.operate_with(&p.t_mid_1.operate_with_self(zeta_n_plus_2.canonical()))
.operate_with(&p.t_hi_1.operate_with_self(zeta_2n_plus_4.canonical()));

// Compute commitment of the non constant part of the linearization of p
// The first term corresponds to the gates constraints
Expand Down Expand Up @@ -408,7 +411,7 @@ impl<F: IsField + IsFFTField + HasDefaultTranscript, CS: IsCommitmentScheme<F>>
// α²*L₁(ζ)*Z(X)
let third_term = p
.z_1
.operate_with_self((&alpha * &alpha * l1_zeta).canonical());
.operate_with_self((&alpha_squared * l1_zeta).canonical());

let p_non_constant_1 = first_term
.operate_with(&second_term)
Expand Down
Loading