@@ -216,7 +216,7 @@ constexpr DyadicFloat<Bits> quick_add(DyadicFloat<Bits> a,
216
216
if (result.mantissa .add (b.mantissa )) {
217
217
// Mantissa addition overflow.
218
218
result.shift_right (1 );
219
- result.mantissa .val [DyadicFloat<Bits>::MantissaType::WORDCOUNT - 1 ] |=
219
+ result.mantissa .val [DyadicFloat<Bits>::MantissaType::WORD_COUNT - 1 ] |=
220
220
(uint64_t (1 ) << 63 );
221
221
}
222
222
// Result is already normalized.
@@ -243,7 +243,7 @@ constexpr DyadicFloat<Bits> quick_add(DyadicFloat<Bits> a,
243
243
// result.mantissa = quick_mul_hi(a.mantissa + b.mantissa)
244
244
// ~ (full product a.mantissa * b.mantissa) >> Bits.
245
245
// The errors compared to the mathematical product is bounded by:
246
- // 2 * errors of quick_mul_hi = 2 * (UInt<Bits>::WORDCOUNT - 1) in ULPs.
246
+ // 2 * errors of quick_mul_hi = 2 * (UInt<Bits>::WORD_COUNT - 1) in ULPs.
247
247
// Assume inputs are normalized (by constructors or other functions) so that we
248
248
// don't need to normalize the inputs again in this function. If the inputs are
249
249
// not normalized, the results might lose precision significantly.
@@ -258,7 +258,7 @@ constexpr DyadicFloat<Bits> quick_mul(DyadicFloat<Bits> a,
258
258
result.mantissa = a.mantissa .quick_mul_hi (b.mantissa );
259
259
// Check the leading bit directly, should be faster than using clz in
260
260
// normalize().
261
- if (result.mantissa .val [DyadicFloat<Bits>::MantissaType::WORDCOUNT - 1 ] >>
261
+ if (result.mantissa .val [DyadicFloat<Bits>::MantissaType::WORD_COUNT - 1 ] >>
262
262
63 ==
263
263
0 )
264
264
result.shift_left (1 );
0 commit comments