fe_generic.go 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. // Copyright (c) 2017 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package field
  5. import "math/bits"
  6. // uint128 holds a 128-bit number as two 64-bit limbs, for use with the
  7. // bits.Mul64 and bits.Add64 intrinsics.
  8. type uint128 struct {
  9. lo, hi uint64
  10. }
  11. // mul64 returns a * b.
  12. func mul64(a, b uint64) uint128 {
  13. hi, lo := bits.Mul64(a, b)
  14. return uint128{lo, hi}
  15. }
  16. // addMul64 returns v + a * b.
  17. func addMul64(v uint128, a, b uint64) uint128 {
  18. hi, lo := bits.Mul64(a, b)
  19. lo, c := bits.Add64(lo, v.lo, 0)
  20. hi, _ = bits.Add64(hi, v.hi, c)
  21. return uint128{lo, hi}
  22. }
  23. // shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
  24. func shiftRightBy51(a uint128) uint64 {
  25. return (a.hi << (64 - 51)) | (a.lo >> 51)
  26. }
  27. func feMulGeneric(v, a, b *Element) {
  28. a0 := a.l0
  29. a1 := a.l1
  30. a2 := a.l2
  31. a3 := a.l3
  32. a4 := a.l4
  33. b0 := b.l0
  34. b1 := b.l1
  35. b2 := b.l2
  36. b3 := b.l3
  37. b4 := b.l4
  38. // Limb multiplication works like pen-and-paper columnar multiplication, but
  39. // with 51-bit limbs instead of digits.
  40. //
  41. // a4 a3 a2 a1 a0 x
  42. // b4 b3 b2 b1 b0 =
  43. // ------------------------
  44. // a4b0 a3b0 a2b0 a1b0 a0b0 +
  45. // a4b1 a3b1 a2b1 a1b1 a0b1 +
  46. // a4b2 a3b2 a2b2 a1b2 a0b2 +
  47. // a4b3 a3b3 a2b3 a1b3 a0b3 +
  48. // a4b4 a3b4 a2b4 a1b4 a0b4 =
  49. // ----------------------------------------------
  50. // r8 r7 r6 r5 r4 r3 r2 r1 r0
  51. //
  52. // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
  53. // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
  54. // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
  55. //
  56. // Reduction can be carried out simultaneously to multiplication. For
  57. // example, we do not compute r5: whenever the result of a multiplication
  58. // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
  59. //
  60. // a4b0 a3b0 a2b0 a1b0 a0b0 +
  61. // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
  62. // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
  63. // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
  64. // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
  65. // --------------------------------------
  66. // r4 r3 r2 r1 r0
  67. //
  68. // Finally we add up the columns into wide, overlapping limbs.
  69. a1_19 := a1 * 19
  70. a2_19 := a2 * 19
  71. a3_19 := a3 * 19
  72. a4_19 := a4 * 19
  73. // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
  74. r0 := mul64(a0, b0)
  75. r0 = addMul64(r0, a1_19, b4)
  76. r0 = addMul64(r0, a2_19, b3)
  77. r0 = addMul64(r0, a3_19, b2)
  78. r0 = addMul64(r0, a4_19, b1)
  79. // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
  80. r1 := mul64(a0, b1)
  81. r1 = addMul64(r1, a1, b0)
  82. r1 = addMul64(r1, a2_19, b4)
  83. r1 = addMul64(r1, a3_19, b3)
  84. r1 = addMul64(r1, a4_19, b2)
  85. // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
  86. r2 := mul64(a0, b2)
  87. r2 = addMul64(r2, a1, b1)
  88. r2 = addMul64(r2, a2, b0)
  89. r2 = addMul64(r2, a3_19, b4)
  90. r2 = addMul64(r2, a4_19, b3)
  91. // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
  92. r3 := mul64(a0, b3)
  93. r3 = addMul64(r3, a1, b2)
  94. r3 = addMul64(r3, a2, b1)
  95. r3 = addMul64(r3, a3, b0)
  96. r3 = addMul64(r3, a4_19, b4)
  97. // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
  98. r4 := mul64(a0, b4)
  99. r4 = addMul64(r4, a1, b3)
  100. r4 = addMul64(r4, a2, b2)
  101. r4 = addMul64(r4, a3, b1)
  102. r4 = addMul64(r4, a4, b0)
  103. // After the multiplication, we need to reduce (carry) the five coefficients
  104. // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
  105. // to respect the Element invariant.
  106. //
  107. // Overall, the reduction works the same as carryPropagate, except with
  108. // wider inputs: we take the carry for each coefficient by shifting it right
  109. // by 51, and add it to the limb above it. The top carry is multiplied by 19
  110. // according to the reduction identity and added to the lowest limb.
  111. //
  112. // The largest coefficient (r0) will be at most 111 bits, which guarantees
  113. // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
  114. //
  115. // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
  116. // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
  117. // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
  118. // r0 < 2⁷ × 2⁵² × 2⁵²
  119. // r0 < 2¹¹¹
  120. //
  121. // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
  122. // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
  123. // allows us to easily apply the reduction identity.
  124. //
  125. // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
  126. // r4 < 5 × 2⁵² × 2⁵²
  127. // r4 < 2¹⁰⁷
  128. //
  129. c0 := shiftRightBy51(r0)
  130. c1 := shiftRightBy51(r1)
  131. c2 := shiftRightBy51(r2)
  132. c3 := shiftRightBy51(r3)
  133. c4 := shiftRightBy51(r4)
  134. rr0 := r0.lo&maskLow51Bits + c4*19
  135. rr1 := r1.lo&maskLow51Bits + c0
  136. rr2 := r2.lo&maskLow51Bits + c1
  137. rr3 := r3.lo&maskLow51Bits + c2
  138. rr4 := r4.lo&maskLow51Bits + c3
  139. // Now all coefficients fit into 64-bit registers but are still too large to
  140. // be passed around as an Element. We therefore do one last carry chain,
  141. // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
  142. *v = Element{rr0, rr1, rr2, rr3, rr4}
  143. v.carryPropagate()
  144. }
  145. func feSquareGeneric(v, a *Element) {
  146. l0 := a.l0
  147. l1 := a.l1
  148. l2 := a.l2
  149. l3 := a.l3
  150. l4 := a.l4
  151. // Squaring works precisely like multiplication above, but thanks to its
  152. // symmetry we get to group a few terms together.
  153. //
  154. // l4 l3 l2 l1 l0 x
  155. // l4 l3 l2 l1 l0 =
  156. // ------------------------
  157. // l4l0 l3l0 l2l0 l1l0 l0l0 +
  158. // l4l1 l3l1 l2l1 l1l1 l0l1 +
  159. // l4l2 l3l2 l2l2 l1l2 l0l2 +
  160. // l4l3 l3l3 l2l3 l1l3 l0l3 +
  161. // l4l4 l3l4 l2l4 l1l4 l0l4 =
  162. // ----------------------------------------------
  163. // r8 r7 r6 r5 r4 r3 r2 r1 r0
  164. //
  165. // l4l0 l3l0 l2l0 l1l0 l0l0 +
  166. // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
  167. // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
  168. // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
  169. // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
  170. // --------------------------------------
  171. // r4 r3 r2 r1 r0
  172. //
  173. // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
  174. // only three Mul64 and four Add64, instead of five and eight.
  175. l0_2 := l0 * 2
  176. l1_2 := l1 * 2
  177. l1_38 := l1 * 38
  178. l2_38 := l2 * 38
  179. l3_38 := l3 * 38
  180. l3_19 := l3 * 19
  181. l4_19 := l4 * 19
  182. // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
  183. r0 := mul64(l0, l0)
  184. r0 = addMul64(r0, l1_38, l4)
  185. r0 = addMul64(r0, l2_38, l3)
  186. // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
  187. r1 := mul64(l0_2, l1)
  188. r1 = addMul64(r1, l2_38, l4)
  189. r1 = addMul64(r1, l3_19, l3)
  190. // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
  191. r2 := mul64(l0_2, l2)
  192. r2 = addMul64(r2, l1, l1)
  193. r2 = addMul64(r2, l3_38, l4)
  194. // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
  195. r3 := mul64(l0_2, l3)
  196. r3 = addMul64(r3, l1_2, l2)
  197. r3 = addMul64(r3, l4_19, l4)
  198. // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
  199. r4 := mul64(l0_2, l4)
  200. r4 = addMul64(r4, l1_2, l3)
  201. r4 = addMul64(r4, l2, l2)
  202. c0 := shiftRightBy51(r0)
  203. c1 := shiftRightBy51(r1)
  204. c2 := shiftRightBy51(r2)
  205. c3 := shiftRightBy51(r3)
  206. c4 := shiftRightBy51(r4)
  207. rr0 := r0.lo&maskLow51Bits + c4*19
  208. rr1 := r1.lo&maskLow51Bits + c0
  209. rr2 := r2.lo&maskLow51Bits + c1
  210. rr3 := r3.lo&maskLow51Bits + c2
  211. rr4 := r4.lo&maskLow51Bits + c3
  212. *v = Element{rr0, rr1, rr2, rr3, rr4}
  213. v.carryPropagate()
  214. }
  215. // carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
  216. // identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
  217. func (v *Element) carryPropagateGeneric() *Element {
  218. c0 := v.l0 >> 51
  219. c1 := v.l1 >> 51
  220. c2 := v.l2 >> 51
  221. c3 := v.l3 >> 51
  222. c4 := v.l4 >> 51
  223. // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
  224. // the final l0 will be at most 52 bits. Similarly for the rest.
  225. v.l0 = v.l0&maskLow51Bits + c4*19
  226. v.l1 = v.l1&maskLow51Bits + c0
  227. v.l2 = v.l2&maskLow51Bits + c1
  228. v.l3 = v.l3&maskLow51Bits + c2
  229. v.l4 = v.l4&maskLow51Bits + c3
  230. return v
  231. }