14
14
#include "rv64v_settings.h"
15
15
16
16
#ifndef MLK_RVV_WIDENING_MUL
17
-
18
17
static inline vint16m1_t fq_redc (vint16m1_t rh , vint16m1_t rl , size_t vl )
19
18
{
20
19
vint16m1_t t ;
@@ -27,8 +26,7 @@ static inline vint16m1_t fq_redc(vint16m1_t rh, vint16m1_t rl, size_t vl)
27
26
28
27
return t ;
29
28
}
30
-
31
- #endif
29
+ #endif /* !MLK_RVV_WIDENING_MUL */
32
30
33
31
/* Narrowing reduction */
34
32
@@ -93,9 +91,9 @@ static inline vint16m1_t fq_mul_vv(vint16m1_t rx, vint16m1_t ry, size_t vl)
93
91
rh = __riscv_vmulh_vv_i16m1 (rx , ry , vl ); /* h = (x * y) / R */
94
92
rl = __riscv_vmul_vv_i16m1 (rx , ry , vl ); /* l = (x * y) % R */
95
93
return fq_redc (rh , rl , vl );
96
- #else
94
+ #else /* !MLK_RVV_WIDENING_MUL */
97
95
return fq_redc2 (__riscv_vwmul_vv_i32m2 (rx , ry , vl ), vl );
98
- #endif
96
+ #endif /* MLK_RVV_WIDENING_MUL */
99
97
}
100
98
101
99
/* Montgomery multiply: vector-scalar */
@@ -108,9 +106,9 @@ static inline vint16m1_t fq_mul_vx(vint16m1_t rx, int16_t ry, size_t vl)
108
106
rh = __riscv_vmulh_vx_i16m1 (rx , ry , vl ); /* h = (x * y) / R */
109
107
rl = __riscv_vmul_vx_i16m1 (rx , ry , vl ); /* l = (x * y) % R */
110
108
return fq_redc (rh , rl , vl );
111
- #else
109
+ #else /* !MLK_RVV_WIDENING_MUL */
112
110
return fq_redc2 (__riscv_vwmul_vx_i32m2 (rx , ry , vl ), vl );
113
- #endif
111
+ #endif /* MLK_RVV_WIDENING_MUL */
114
112
}
115
113
116
114
/* full normalization */
@@ -670,7 +668,7 @@ void mlk_rv64v_poly_basemul_mont_add_k4(int16_t *r, const int16_t *a,
670
668
mlk_rv64v_poly_basemul_mont_add_k (r , a , b , 4 * MLKEM_N );
671
669
}
672
670
673
- #endif /* ( MLK_RVV_VLEN == 256) */
671
+ #endif /* MLK_RVV_VLEN == 256 */
674
672
675
673
/*************************************************
676
674
* Name: poly_tomont
0 commit comments