ecp_nistp256.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366
  1. /*
  2. * Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the OpenSSL license (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* Copyright 2011 Google Inc.
  10. *
  11. * Licensed under the Apache License, Version 2.0 (the "License");
  12. *
  13. * you may not use this file except in compliance with the License.
  14. * You may obtain a copy of the License at
  15. *
  16. * http://www.apache.org/licenses/LICENSE-2.0
  17. *
  18. * Unless required by applicable law or agreed to in writing, software
  19. * distributed under the License is distributed on an "AS IS" BASIS,
  20. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21. * See the License for the specific language governing permissions and
  22. * limitations under the License.
  23. */
  24. /*
  25. * A 64-bit implementation of the NIST P-256 elliptic curve point multiplication
  26. *
  27. * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
  28. * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
  29. * work which got its smarts from Daniel J. Bernstein's work on the same.
  30. */
  31. #include <openssl/opensslconf.h>
  32. #ifdef OPENSSL_NO_EC_NISTP_64_GCC_128
  33. NON_EMPTY_TRANSLATION_UNIT
  34. #else
  35. # include <stdint.h>
  36. # include <string.h>
  37. # include <openssl/err.h>
  38. # include "ec_local.h"
  39. # if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16
  40. /* even with gcc, the typedef won't work for 32-bit platforms */
  41. typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
  42. * platforms */
  43. typedef __int128_t int128_t;
  44. # else
  45. # error "Your compiler doesn't appear to support 128-bit integer types"
  46. # endif
  47. typedef uint8_t u8;
  48. typedef uint32_t u32;
  49. typedef uint64_t u64;
  50. /*
  51. * The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We
  52. * can serialise an element of this field into 32 bytes. We call this an
  53. * felem_bytearray.
  54. */
  55. typedef u8 felem_bytearray[32];
  56. /*
  57. * These are the parameters of P256, taken from FIPS 186-3, page 86. These
  58. * values are big-endian.
  59. */
  60. static const felem_bytearray nistp256_curve_params[5] = {
  61. {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, /* p */
  62. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  63. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
  64. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
  65. {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, /* a = -3 */
  66. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  67. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
  68. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc},
  69. {0x5a, 0xc6, 0x35, 0xd8, 0xaa, 0x3a, 0x93, 0xe7, /* b */
  70. 0xb3, 0xeb, 0xbd, 0x55, 0x76, 0x98, 0x86, 0xbc,
  71. 0x65, 0x1d, 0x06, 0xb0, 0xcc, 0x53, 0xb0, 0xf6,
  72. 0x3b, 0xce, 0x3c, 0x3e, 0x27, 0xd2, 0x60, 0x4b},
  73. {0x6b, 0x17, 0xd1, 0xf2, 0xe1, 0x2c, 0x42, 0x47, /* x */
  74. 0xf8, 0xbc, 0xe6, 0xe5, 0x63, 0xa4, 0x40, 0xf2,
  75. 0x77, 0x03, 0x7d, 0x81, 0x2d, 0xeb, 0x33, 0xa0,
  76. 0xf4, 0xa1, 0x39, 0x45, 0xd8, 0x98, 0xc2, 0x96},
  77. {0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, /* y */
  78. 0x8e, 0xe7, 0xeb, 0x4a, 0x7c, 0x0f, 0x9e, 0x16,
  79. 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce,
  80. 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5}
  81. };
  82. /*-
  83. * The representation of field elements.
  84. * ------------------------------------
  85. *
  86. * We represent field elements with either four 128-bit values, eight 128-bit
  87. * values, or four 64-bit values. The field element represented is:
  88. * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
  89. * or:
  90. * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
  91. *
  92. * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
  93. * apart, but are 128-bits wide, the most significant bits of each limb overlap
  94. * with the least significant bits of the next.
  95. *
  96. * A field element with four limbs is an 'felem'. One with eight limbs is a
  97. * 'longfelem'
  98. *
  99. * A field element with four, 64-bit values is called a 'smallfelem'. Small
  100. * values are used as intermediate values before multiplication.
  101. */
  102. # define NLIMBS 4
  103. typedef uint128_t limb;
  104. typedef limb felem[NLIMBS];
  105. typedef limb longfelem[NLIMBS * 2];
  106. typedef u64 smallfelem[NLIMBS];
  107. /* This is the value of the prime as four 64-bit words, little-endian. */
  108. static const u64 kPrime[4] =
  109. { 0xfffffffffffffffful, 0xffffffff, 0, 0xffffffff00000001ul };
  110. static const u64 bottom63bits = 0x7ffffffffffffffful;
  111. /*
  112. * bin32_to_felem takes a little-endian byte array and converts it into felem
  113. * form. This assumes that the CPU is little-endian.
  114. */
  115. static void bin32_to_felem(felem out, const u8 in[32])
  116. {
  117. out[0] = *((u64 *)&in[0]);
  118. out[1] = *((u64 *)&in[8]);
  119. out[2] = *((u64 *)&in[16]);
  120. out[3] = *((u64 *)&in[24]);
  121. }
  122. /*
  123. * smallfelem_to_bin32 takes a smallfelem and serialises into a little
  124. * endian, 32 byte array. This assumes that the CPU is little-endian.
  125. */
  126. static void smallfelem_to_bin32(u8 out[32], const smallfelem in)
  127. {
  128. *((u64 *)&out[0]) = in[0];
  129. *((u64 *)&out[8]) = in[1];
  130. *((u64 *)&out[16]) = in[2];
  131. *((u64 *)&out[24]) = in[3];
  132. }
  133. /* BN_to_felem converts an OpenSSL BIGNUM into an felem */
  134. static int BN_to_felem(felem out, const BIGNUM *bn)
  135. {
  136. felem_bytearray b_out;
  137. int num_bytes;
  138. if (BN_is_negative(bn)) {
  139. ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
  140. return 0;
  141. }
  142. num_bytes = BN_bn2lebinpad(bn, b_out, sizeof(b_out));
  143. if (num_bytes < 0) {
  144. ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
  145. return 0;
  146. }
  147. bin32_to_felem(out, b_out);
  148. return 1;
  149. }
  150. /* felem_to_BN converts an felem into an OpenSSL BIGNUM */
  151. static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in)
  152. {
  153. felem_bytearray b_out;
  154. smallfelem_to_bin32(b_out, in);
  155. return BN_lebin2bn(b_out, sizeof(b_out), out);
  156. }
  157. /*-
  158. * Field operations
  159. * ----------------
  160. */
  161. static void smallfelem_one(smallfelem out)
  162. {
  163. out[0] = 1;
  164. out[1] = 0;
  165. out[2] = 0;
  166. out[3] = 0;
  167. }
  168. static void smallfelem_assign(smallfelem out, const smallfelem in)
  169. {
  170. out[0] = in[0];
  171. out[1] = in[1];
  172. out[2] = in[2];
  173. out[3] = in[3];
  174. }
  175. static void felem_assign(felem out, const felem in)
  176. {
  177. out[0] = in[0];
  178. out[1] = in[1];
  179. out[2] = in[2];
  180. out[3] = in[3];
  181. }
  182. /* felem_sum sets out = out + in. */
  183. static void felem_sum(felem out, const felem in)
  184. {
  185. out[0] += in[0];
  186. out[1] += in[1];
  187. out[2] += in[2];
  188. out[3] += in[3];
  189. }
  190. /* felem_small_sum sets out = out + in. */
  191. static void felem_small_sum(felem out, const smallfelem in)
  192. {
  193. out[0] += in[0];
  194. out[1] += in[1];
  195. out[2] += in[2];
  196. out[3] += in[3];
  197. }
  198. /* felem_scalar sets out = out * scalar */
  199. static void felem_scalar(felem out, const u64 scalar)
  200. {
  201. out[0] *= scalar;
  202. out[1] *= scalar;
  203. out[2] *= scalar;
  204. out[3] *= scalar;
  205. }
  206. /* longfelem_scalar sets out = out * scalar */
  207. static void longfelem_scalar(longfelem out, const u64 scalar)
  208. {
  209. out[0] *= scalar;
  210. out[1] *= scalar;
  211. out[2] *= scalar;
  212. out[3] *= scalar;
  213. out[4] *= scalar;
  214. out[5] *= scalar;
  215. out[6] *= scalar;
  216. out[7] *= scalar;
  217. }
  218. # define two105m41m9 (((limb)1) << 105) - (((limb)1) << 41) - (((limb)1) << 9)
  219. # define two105 (((limb)1) << 105)
  220. # define two105m41p9 (((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9)
  221. /* zero105 is 0 mod p */
  222. static const felem zero105 =
  223. { two105m41m9, two105, two105m41p9, two105m41p9 };
  224. /*-
  225. * smallfelem_neg sets |out| to |-small|
  226. * On exit:
  227. * out[i] < out[i] + 2^105
  228. */
  229. static void smallfelem_neg(felem out, const smallfelem small)
  230. {
  231. /* In order to prevent underflow, we subtract from 0 mod p. */
  232. out[0] = zero105[0] - small[0];
  233. out[1] = zero105[1] - small[1];
  234. out[2] = zero105[2] - small[2];
  235. out[3] = zero105[3] - small[3];
  236. }
  237. /*-
  238. * felem_diff subtracts |in| from |out|
  239. * On entry:
  240. * in[i] < 2^104
  241. * On exit:
  242. * out[i] < out[i] + 2^105
  243. */
  244. static void felem_diff(felem out, const felem in)
  245. {
  246. /*
  247. * In order to prevent underflow, we add 0 mod p before subtracting.
  248. */
  249. out[0] += zero105[0];
  250. out[1] += zero105[1];
  251. out[2] += zero105[2];
  252. out[3] += zero105[3];
  253. out[0] -= in[0];
  254. out[1] -= in[1];
  255. out[2] -= in[2];
  256. out[3] -= in[3];
  257. }
  258. # define two107m43m11 (((limb)1) << 107) - (((limb)1) << 43) - (((limb)1) << 11)
  259. # define two107 (((limb)1) << 107)
  260. # define two107m43p11 (((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11)
  261. /* zero107 is 0 mod p */
  262. static const felem zero107 =
  263. { two107m43m11, two107, two107m43p11, two107m43p11 };
  264. /*-
  265. * An alternative felem_diff for larger inputs |in|
  266. * felem_diff_zero107 subtracts |in| from |out|
  267. * On entry:
  268. * in[i] < 2^106
  269. * On exit:
  270. * out[i] < out[i] + 2^107
  271. */
  272. static void felem_diff_zero107(felem out, const felem in)
  273. {
  274. /*
  275. * In order to prevent underflow, we add 0 mod p before subtracting.
  276. */
  277. out[0] += zero107[0];
  278. out[1] += zero107[1];
  279. out[2] += zero107[2];
  280. out[3] += zero107[3];
  281. out[0] -= in[0];
  282. out[1] -= in[1];
  283. out[2] -= in[2];
  284. out[3] -= in[3];
  285. }
  286. /*-
  287. * longfelem_diff subtracts |in| from |out|
  288. * On entry:
  289. * in[i] < 7*2^67
  290. * On exit:
  291. * out[i] < out[i] + 2^70 + 2^40
  292. */
  293. static void longfelem_diff(longfelem out, const longfelem in)
  294. {
  295. static const limb two70m8p6 =
  296. (((limb) 1) << 70) - (((limb) 1) << 8) + (((limb) 1) << 6);
  297. static const limb two70p40 = (((limb) 1) << 70) + (((limb) 1) << 40);
  298. static const limb two70 = (((limb) 1) << 70);
  299. static const limb two70m40m38p6 =
  300. (((limb) 1) << 70) - (((limb) 1) << 40) - (((limb) 1) << 38) +
  301. (((limb) 1) << 6);
  302. static const limb two70m6 = (((limb) 1) << 70) - (((limb) 1) << 6);
  303. /* add 0 mod p to avoid underflow */
  304. out[0] += two70m8p6;
  305. out[1] += two70p40;
  306. out[2] += two70;
  307. out[3] += two70m40m38p6;
  308. out[4] += two70m6;
  309. out[5] += two70m6;
  310. out[6] += two70m6;
  311. out[7] += two70m6;
  312. /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */
  313. out[0] -= in[0];
  314. out[1] -= in[1];
  315. out[2] -= in[2];
  316. out[3] -= in[3];
  317. out[4] -= in[4];
  318. out[5] -= in[5];
  319. out[6] -= in[6];
  320. out[7] -= in[7];
  321. }
  322. # define two64m0 (((limb)1) << 64) - 1
  323. # define two110p32m0 (((limb)1) << 110) + (((limb)1) << 32) - 1
  324. # define two64m46 (((limb)1) << 64) - (((limb)1) << 46)
  325. # define two64m32 (((limb)1) << 64) - (((limb)1) << 32)
  326. /* zero110 is 0 mod p */
  327. static const felem zero110 = { two64m0, two110p32m0, two64m46, two64m32 };
  328. /*-
  329. * felem_shrink converts an felem into a smallfelem. The result isn't quite
  330. * minimal as the value may be greater than p.
  331. *
  332. * On entry:
  333. * in[i] < 2^109
  334. * On exit:
  335. * out[i] < 2^64
  336. */
  337. static void felem_shrink(smallfelem out, const felem in)
  338. {
  339. felem tmp;
  340. u64 a, b, mask;
  341. u64 high, low;
  342. static const u64 kPrime3Test = 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */
  343. /* Carry 2->3 */
  344. tmp[3] = zero110[3] + in[3] + ((u64)(in[2] >> 64));
  345. /* tmp[3] < 2^110 */
  346. tmp[2] = zero110[2] + (u64)in[2];
  347. tmp[0] = zero110[0] + in[0];
  348. tmp[1] = zero110[1] + in[1];
  349. /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */
  350. /*
  351. * We perform two partial reductions where we eliminate the high-word of
  352. * tmp[3]. We don't update the other words till the end.
  353. */
  354. a = tmp[3] >> 64; /* a < 2^46 */
  355. tmp[3] = (u64)tmp[3];
  356. tmp[3] -= a;
  357. tmp[3] += ((limb) a) << 32;
  358. /* tmp[3] < 2^79 */
  359. b = a;
  360. a = tmp[3] >> 64; /* a < 2^15 */
  361. b += a; /* b < 2^46 + 2^15 < 2^47 */
  362. tmp[3] = (u64)tmp[3];
  363. tmp[3] -= a;
  364. tmp[3] += ((limb) a) << 32;
  365. /* tmp[3] < 2^64 + 2^47 */
  366. /*
  367. * This adjusts the other two words to complete the two partial
  368. * reductions.
  369. */
  370. tmp[0] += b;
  371. tmp[1] -= (((limb) b) << 32);
  372. /*
  373. * In order to make space in tmp[3] for the carry from 2 -> 3, we
  374. * conditionally subtract kPrime if tmp[3] is large enough.
  375. */
  376. high = (u64)(tmp[3] >> 64);
  377. /* As tmp[3] < 2^65, high is either 1 or 0 */
  378. high = 0 - high;
  379. /*-
  380. * high is:
  381. * all ones if the high word of tmp[3] is 1
  382. * all zeros if the high word of tmp[3] if 0
  383. */
  384. low = (u64)tmp[3];
  385. mask = 0 - (low >> 63);
  386. /*-
  387. * mask is:
  388. * all ones if the MSB of low is 1
  389. * all zeros if the MSB of low if 0
  390. */
  391. low &= bottom63bits;
  392. low -= kPrime3Test;
  393. /* if low was greater than kPrime3Test then the MSB is zero */
  394. low = ~low;
  395. low = 0 - (low >> 63);
  396. /*-
  397. * low is:
  398. * all ones if low was > kPrime3Test
  399. * all zeros if low was <= kPrime3Test
  400. */
  401. mask = (mask & low) | high;
  402. tmp[0] -= mask & kPrime[0];
  403. tmp[1] -= mask & kPrime[1];
  404. /* kPrime[2] is zero, so omitted */
  405. tmp[3] -= mask & kPrime[3];
  406. /* tmp[3] < 2**64 - 2**32 + 1 */
  407. tmp[1] += ((u64)(tmp[0] >> 64));
  408. tmp[0] = (u64)tmp[0];
  409. tmp[2] += ((u64)(tmp[1] >> 64));
  410. tmp[1] = (u64)tmp[1];
  411. tmp[3] += ((u64)(tmp[2] >> 64));
  412. tmp[2] = (u64)tmp[2];
  413. /* tmp[i] < 2^64 */
  414. out[0] = tmp[0];
  415. out[1] = tmp[1];
  416. out[2] = tmp[2];
  417. out[3] = tmp[3];
  418. }
  419. /* smallfelem_expand converts a smallfelem to an felem */
  420. static void smallfelem_expand(felem out, const smallfelem in)
  421. {
  422. out[0] = in[0];
  423. out[1] = in[1];
  424. out[2] = in[2];
  425. out[3] = in[3];
  426. }
  427. /*-
  428. * smallfelem_square sets |out| = |small|^2
  429. * On entry:
  430. * small[i] < 2^64
  431. * On exit:
  432. * out[i] < 7 * 2^64 < 2^67
  433. */
  434. static void smallfelem_square(longfelem out, const smallfelem small)
  435. {
  436. limb a;
  437. u64 high, low;
  438. a = ((uint128_t) small[0]) * small[0];
  439. low = a;
  440. high = a >> 64;
  441. out[0] = low;
  442. out[1] = high;
  443. a = ((uint128_t) small[0]) * small[1];
  444. low = a;
  445. high = a >> 64;
  446. out[1] += low;
  447. out[1] += low;
  448. out[2] = high;
  449. a = ((uint128_t) small[0]) * small[2];
  450. low = a;
  451. high = a >> 64;
  452. out[2] += low;
  453. out[2] *= 2;
  454. out[3] = high;
  455. a = ((uint128_t) small[0]) * small[3];
  456. low = a;
  457. high = a >> 64;
  458. out[3] += low;
  459. out[4] = high;
  460. a = ((uint128_t) small[1]) * small[2];
  461. low = a;
  462. high = a >> 64;
  463. out[3] += low;
  464. out[3] *= 2;
  465. out[4] += high;
  466. a = ((uint128_t) small[1]) * small[1];
  467. low = a;
  468. high = a >> 64;
  469. out[2] += low;
  470. out[3] += high;
  471. a = ((uint128_t) small[1]) * small[3];
  472. low = a;
  473. high = a >> 64;
  474. out[4] += low;
  475. out[4] *= 2;
  476. out[5] = high;
  477. a = ((uint128_t) small[2]) * small[3];
  478. low = a;
  479. high = a >> 64;
  480. out[5] += low;
  481. out[5] *= 2;
  482. out[6] = high;
  483. out[6] += high;
  484. a = ((uint128_t) small[2]) * small[2];
  485. low = a;
  486. high = a >> 64;
  487. out[4] += low;
  488. out[5] += high;
  489. a = ((uint128_t) small[3]) * small[3];
  490. low = a;
  491. high = a >> 64;
  492. out[6] += low;
  493. out[7] = high;
  494. }
  495. /*-
  496. * felem_square sets |out| = |in|^2
  497. * On entry:
  498. * in[i] < 2^109
  499. * On exit:
  500. * out[i] < 7 * 2^64 < 2^67
  501. */
  502. static void felem_square(longfelem out, const felem in)
  503. {
  504. u64 small[4];
  505. felem_shrink(small, in);
  506. smallfelem_square(out, small);
  507. }
  508. /*-
  509. * smallfelem_mul sets |out| = |small1| * |small2|
  510. * On entry:
  511. * small1[i] < 2^64
  512. * small2[i] < 2^64
  513. * On exit:
  514. * out[i] < 7 * 2^64 < 2^67
  515. */
  516. static void smallfelem_mul(longfelem out, const smallfelem small1,
  517. const smallfelem small2)
  518. {
  519. limb a;
  520. u64 high, low;
  521. a = ((uint128_t) small1[0]) * small2[0];
  522. low = a;
  523. high = a >> 64;
  524. out[0] = low;
  525. out[1] = high;
  526. a = ((uint128_t) small1[0]) * small2[1];
  527. low = a;
  528. high = a >> 64;
  529. out[1] += low;
  530. out[2] = high;
  531. a = ((uint128_t) small1[1]) * small2[0];
  532. low = a;
  533. high = a >> 64;
  534. out[1] += low;
  535. out[2] += high;
  536. a = ((uint128_t) small1[0]) * small2[2];
  537. low = a;
  538. high = a >> 64;
  539. out[2] += low;
  540. out[3] = high;
  541. a = ((uint128_t) small1[1]) * small2[1];
  542. low = a;
  543. high = a >> 64;
  544. out[2] += low;
  545. out[3] += high;
  546. a = ((uint128_t) small1[2]) * small2[0];
  547. low = a;
  548. high = a >> 64;
  549. out[2] += low;
  550. out[3] += high;
  551. a = ((uint128_t) small1[0]) * small2[3];
  552. low = a;
  553. high = a >> 64;
  554. out[3] += low;
  555. out[4] = high;
  556. a = ((uint128_t) small1[1]) * small2[2];
  557. low = a;
  558. high = a >> 64;
  559. out[3] += low;
  560. out[4] += high;
  561. a = ((uint128_t) small1[2]) * small2[1];
  562. low = a;
  563. high = a >> 64;
  564. out[3] += low;
  565. out[4] += high;
  566. a = ((uint128_t) small1[3]) * small2[0];
  567. low = a;
  568. high = a >> 64;
  569. out[3] += low;
  570. out[4] += high;
  571. a = ((uint128_t) small1[1]) * small2[3];
  572. low = a;
  573. high = a >> 64;
  574. out[4] += low;
  575. out[5] = high;
  576. a = ((uint128_t) small1[2]) * small2[2];
  577. low = a;
  578. high = a >> 64;
  579. out[4] += low;
  580. out[5] += high;
  581. a = ((uint128_t) small1[3]) * small2[1];
  582. low = a;
  583. high = a >> 64;
  584. out[4] += low;
  585. out[5] += high;
  586. a = ((uint128_t) small1[2]) * small2[3];
  587. low = a;
  588. high = a >> 64;
  589. out[5] += low;
  590. out[6] = high;
  591. a = ((uint128_t) small1[3]) * small2[2];
  592. low = a;
  593. high = a >> 64;
  594. out[5] += low;
  595. out[6] += high;
  596. a = ((uint128_t) small1[3]) * small2[3];
  597. low = a;
  598. high = a >> 64;
  599. out[6] += low;
  600. out[7] = high;
  601. }
  602. /*-
  603. * felem_mul sets |out| = |in1| * |in2|
  604. * On entry:
  605. * in1[i] < 2^109
  606. * in2[i] < 2^109
  607. * On exit:
  608. * out[i] < 7 * 2^64 < 2^67
  609. */
  610. static void felem_mul(longfelem out, const felem in1, const felem in2)
  611. {
  612. smallfelem small1, small2;
  613. felem_shrink(small1, in1);
  614. felem_shrink(small2, in2);
  615. smallfelem_mul(out, small1, small2);
  616. }
  617. /*-
  618. * felem_small_mul sets |out| = |small1| * |in2|
  619. * On entry:
  620. * small1[i] < 2^64
  621. * in2[i] < 2^109
  622. * On exit:
  623. * out[i] < 7 * 2^64 < 2^67
  624. */
  625. static void felem_small_mul(longfelem out, const smallfelem small1,
  626. const felem in2)
  627. {
  628. smallfelem small2;
  629. felem_shrink(small2, in2);
  630. smallfelem_mul(out, small1, small2);
  631. }
  632. # define two100m36m4 (((limb)1) << 100) - (((limb)1) << 36) - (((limb)1) << 4)
  633. # define two100 (((limb)1) << 100)
  634. # define two100m36p4 (((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4)
  635. /* zero100 is 0 mod p */
  636. static const felem zero100 =
  637. { two100m36m4, two100, two100m36p4, two100m36p4 };
  638. /*-
  639. * Internal function for the different flavours of felem_reduce.
  640. * felem_reduce_ reduces the higher coefficients in[4]-in[7].
  641. * On entry:
  642. * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
  643. * out[1] >= in[7] + 2^32*in[4]
  644. * out[2] >= in[5] + 2^32*in[5]
  645. * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
  646. * On exit:
  647. * out[0] <= out[0] + in[4] + 2^32*in[5]
  648. * out[1] <= out[1] + in[5] + 2^33*in[6]
  649. * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
  650. * out[3] <= out[3] + 2^32*in[4] + 3*in[7]
  651. */
  652. static void felem_reduce_(felem out, const longfelem in)
  653. {
  654. int128_t c;
  655. /* combine common terms from below */
  656. c = in[4] + (in[5] << 32);
  657. out[0] += c;
  658. out[3] -= c;
  659. c = in[5] - in[7];
  660. out[1] += c;
  661. out[2] -= c;
  662. /* the remaining terms */
  663. /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */
  664. out[1] -= (in[4] << 32);
  665. out[3] += (in[4] << 32);
  666. /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */
  667. out[2] -= (in[5] << 32);
  668. /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */
  669. out[0] -= in[6];
  670. out[0] -= (in[6] << 32);
  671. out[1] += (in[6] << 33);
  672. out[2] += (in[6] * 2);
  673. out[3] -= (in[6] << 32);
  674. /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */
  675. out[0] -= in[7];
  676. out[0] -= (in[7] << 32);
  677. out[2] += (in[7] << 33);
  678. out[3] += (in[7] * 3);
  679. }
  680. /*-
  681. * felem_reduce converts a longfelem into an felem.
  682. * To be called directly after felem_square or felem_mul.
  683. * On entry:
  684. * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
  685. * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
  686. * On exit:
  687. * out[i] < 2^101
  688. */
  689. static void felem_reduce(felem out, const longfelem in)
  690. {
  691. out[0] = zero100[0] + in[0];
  692. out[1] = zero100[1] + in[1];
  693. out[2] = zero100[2] + in[2];
  694. out[3] = zero100[3] + in[3];
  695. felem_reduce_(out, in);
  696. /*-
  697. * out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
  698. * out[1] > 2^100 - 2^64 - 7*2^96 > 0
  699. * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
  700. * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
  701. *
  702. * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
  703. * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
  704. * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
  705. * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
  706. */
  707. }
  708. /*-
  709. * felem_reduce_zero105 converts a larger longfelem into an felem.
  710. * On entry:
  711. * in[0] < 2^71
  712. * On exit:
  713. * out[i] < 2^106
  714. */
  715. static void felem_reduce_zero105(felem out, const longfelem in)
  716. {
  717. out[0] = zero105[0] + in[0];
  718. out[1] = zero105[1] + in[1];
  719. out[2] = zero105[2] + in[2];
  720. out[3] = zero105[3] + in[3];
  721. felem_reduce_(out, in);
  722. /*-
  723. * out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
  724. * out[1] > 2^105 - 2^71 - 2^103 > 0
  725. * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
  726. * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
  727. *
  728. * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
  729. * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
  730. * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
  731. * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
  732. */
  733. }
  734. /*
  735. * subtract_u64 sets *result = *result - v and *carry to one if the
  736. * subtraction underflowed.
  737. */
  738. static void subtract_u64(u64 *result, u64 *carry, u64 v)
  739. {
  740. uint128_t r = *result;
  741. r -= v;
  742. *carry = (r >> 64) & 1;
  743. *result = (u64)r;
  744. }
  745. /*
  746. * felem_contract converts |in| to its unique, minimal representation. On
  747. * entry: in[i] < 2^109
  748. */
  749. static void felem_contract(smallfelem out, const felem in)
  750. {
  751. unsigned i;
  752. u64 all_equal_so_far = 0, result = 0, carry;
  753. felem_shrink(out, in);
  754. /* small is minimal except that the value might be > p */
  755. all_equal_so_far--;
  756. /*
  757. * We are doing a constant time test if out >= kPrime. We need to compare
  758. * each u64, from most-significant to least significant. For each one, if
  759. * all words so far have been equal (m is all ones) then a non-equal
  760. * result is the answer. Otherwise we continue.
  761. */
  762. for (i = 3; i < 4; i--) {
  763. u64 equal;
  764. uint128_t a = ((uint128_t) kPrime[i]) - out[i];
  765. /*
  766. * if out[i] > kPrime[i] then a will underflow and the high 64-bits
  767. * will all be set.
  768. */
  769. result |= all_equal_so_far & ((u64)(a >> 64));
  770. /*
  771. * if kPrime[i] == out[i] then |equal| will be all zeros and the
  772. * decrement will make it all ones.
  773. */
  774. equal = kPrime[i] ^ out[i];
  775. equal--;
  776. equal &= equal << 32;
  777. equal &= equal << 16;
  778. equal &= equal << 8;
  779. equal &= equal << 4;
  780. equal &= equal << 2;
  781. equal &= equal << 1;
  782. equal = 0 - (equal >> 63);
  783. all_equal_so_far &= equal;
  784. }
  785. /*
  786. * if all_equal_so_far is still all ones then the two values are equal
  787. * and so out >= kPrime is true.
  788. */
  789. result |= all_equal_so_far;
  790. /* if out >= kPrime then we subtract kPrime. */
  791. subtract_u64(&out[0], &carry, result & kPrime[0]);
  792. subtract_u64(&out[1], &carry, carry);
  793. subtract_u64(&out[2], &carry, carry);
  794. subtract_u64(&out[3], &carry, carry);
  795. subtract_u64(&out[1], &carry, result & kPrime[1]);
  796. subtract_u64(&out[2], &carry, carry);
  797. subtract_u64(&out[3], &carry, carry);
  798. subtract_u64(&out[2], &carry, result & kPrime[2]);
  799. subtract_u64(&out[3], &carry, carry);
  800. subtract_u64(&out[3], &carry, result & kPrime[3]);
  801. }
  802. static void smallfelem_square_contract(smallfelem out, const smallfelem in)
  803. {
  804. longfelem longtmp;
  805. felem tmp;
  806. smallfelem_square(longtmp, in);
  807. felem_reduce(tmp, longtmp);
  808. felem_contract(out, tmp);
  809. }
  810. static void smallfelem_mul_contract(smallfelem out, const smallfelem in1,
  811. const smallfelem in2)
  812. {
  813. longfelem longtmp;
  814. felem tmp;
  815. smallfelem_mul(longtmp, in1, in2);
  816. felem_reduce(tmp, longtmp);
  817. felem_contract(out, tmp);
  818. }
  819. /*-
  820. * felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
  821. * otherwise.
  822. * On entry:
  823. * small[i] < 2^64
  824. */
  825. static limb smallfelem_is_zero(const smallfelem small)
  826. {
  827. limb result;
  828. u64 is_p;
  829. u64 is_zero = small[0] | small[1] | small[2] | small[3];
  830. is_zero--;
  831. is_zero &= is_zero << 32;
  832. is_zero &= is_zero << 16;
  833. is_zero &= is_zero << 8;
  834. is_zero &= is_zero << 4;
  835. is_zero &= is_zero << 2;
  836. is_zero &= is_zero << 1;
  837. is_zero = 0 - (is_zero >> 63);
  838. is_p = (small[0] ^ kPrime[0]) |
  839. (small[1] ^ kPrime[1]) |
  840. (small[2] ^ kPrime[2]) | (small[3] ^ kPrime[3]);
  841. is_p--;
  842. is_p &= is_p << 32;
  843. is_p &= is_p << 16;
  844. is_p &= is_p << 8;
  845. is_p &= is_p << 4;
  846. is_p &= is_p << 2;
  847. is_p &= is_p << 1;
  848. is_p = 0 - (is_p >> 63);
  849. is_zero |= is_p;
  850. result = is_zero;
  851. result |= ((limb) is_zero) << 64;
  852. return result;
  853. }
  854. static int smallfelem_is_zero_int(const void *small)
  855. {
  856. return (int)(smallfelem_is_zero(small) & ((limb) 1));
  857. }
  858. /*-
  859. * felem_inv calculates |out| = |in|^{-1}
  860. *
  861. * Based on Fermat's Little Theorem:
  862. * a^p = a (mod p)
  863. * a^{p-1} = 1 (mod p)
  864. * a^{p-2} = a^{-1} (mod p)
  865. */
  866. static void felem_inv(felem out, const felem in)
  867. {
  868. felem ftmp, ftmp2;
  869. /* each e_I will hold |in|^{2^I - 1} */
  870. felem e2, e4, e8, e16, e32, e64;
  871. longfelem tmp;
  872. unsigned i;
  873. felem_square(tmp, in);
  874. felem_reduce(ftmp, tmp); /* 2^1 */
  875. felem_mul(tmp, in, ftmp);
  876. felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
  877. felem_assign(e2, ftmp);
  878. felem_square(tmp, ftmp);
  879. felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
  880. felem_square(tmp, ftmp);
  881. felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */
  882. felem_mul(tmp, ftmp, e2);
  883. felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */
  884. felem_assign(e4, ftmp);
  885. felem_square(tmp, ftmp);
  886. felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */
  887. felem_square(tmp, ftmp);
  888. felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */
  889. felem_square(tmp, ftmp);
  890. felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */
  891. felem_square(tmp, ftmp);
  892. felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */
  893. felem_mul(tmp, ftmp, e4);
  894. felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
  895. felem_assign(e8, ftmp);
  896. for (i = 0; i < 8; i++) {
  897. felem_square(tmp, ftmp);
  898. felem_reduce(ftmp, tmp);
  899. } /* 2^16 - 2^8 */
  900. felem_mul(tmp, ftmp, e8);
  901. felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
  902. felem_assign(e16, ftmp);
  903. for (i = 0; i < 16; i++) {
  904. felem_square(tmp, ftmp);
  905. felem_reduce(ftmp, tmp);
  906. } /* 2^32 - 2^16 */
  907. felem_mul(tmp, ftmp, e16);
  908. felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
  909. felem_assign(e32, ftmp);
  910. for (i = 0; i < 32; i++) {
  911. felem_square(tmp, ftmp);
  912. felem_reduce(ftmp, tmp);
  913. } /* 2^64 - 2^32 */
  914. felem_assign(e64, ftmp);
  915. felem_mul(tmp, ftmp, in);
  916. felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
  917. for (i = 0; i < 192; i++) {
  918. felem_square(tmp, ftmp);
  919. felem_reduce(ftmp, tmp);
  920. } /* 2^256 - 2^224 + 2^192 */
  921. felem_mul(tmp, e64, e32);
  922. felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
  923. for (i = 0; i < 16; i++) {
  924. felem_square(tmp, ftmp2);
  925. felem_reduce(ftmp2, tmp);
  926. } /* 2^80 - 2^16 */
  927. felem_mul(tmp, ftmp2, e16);
  928. felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
  929. for (i = 0; i < 8; i++) {
  930. felem_square(tmp, ftmp2);
  931. felem_reduce(ftmp2, tmp);
  932. } /* 2^88 - 2^8 */
  933. felem_mul(tmp, ftmp2, e8);
  934. felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
  935. for (i = 0; i < 4; i++) {
  936. felem_square(tmp, ftmp2);
  937. felem_reduce(ftmp2, tmp);
  938. } /* 2^92 - 2^4 */
  939. felem_mul(tmp, ftmp2, e4);
  940. felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */
  941. felem_square(tmp, ftmp2);
  942. felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */
  943. felem_square(tmp, ftmp2);
  944. felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */
  945. felem_mul(tmp, ftmp2, e2);
  946. felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */
  947. felem_square(tmp, ftmp2);
  948. felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */
  949. felem_square(tmp, ftmp2);
  950. felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */
  951. felem_mul(tmp, ftmp2, in);
  952. felem_reduce(ftmp2, tmp); /* 2^96 - 3 */
  953. felem_mul(tmp, ftmp2, ftmp);
  954. felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
  955. }
  956. static void smallfelem_inv_contract(smallfelem out, const smallfelem in)
  957. {
  958. felem tmp;
  959. smallfelem_expand(tmp, in);
  960. felem_inv(tmp, tmp);
  961. felem_contract(out, tmp);
  962. }
  963. /*-
  964. * Group operations
  965. * ----------------
  966. *
  967. * Building on top of the field operations we have the operations on the
  968. * elliptic curve group itself. Points on the curve are represented in Jacobian
  969. * coordinates
  970. */
  971. /*-
  972. * point_double calculates 2*(x_in, y_in, z_in)
  973. *
  974. * The method is taken from:
  975. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
  976. *
  977. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
  978. * while x_out == y_in is not (maybe this works, but it's not tested).
  979. */
  980. static void
  981. point_double(felem x_out, felem y_out, felem z_out,
  982. const felem x_in, const felem y_in, const felem z_in)
  983. {
  984. longfelem tmp, tmp2;
  985. felem delta, gamma, beta, alpha, ftmp, ftmp2;
  986. smallfelem small1, small2;
  987. felem_assign(ftmp, x_in);
  988. /* ftmp[i] < 2^106 */
  989. felem_assign(ftmp2, x_in);
  990. /* ftmp2[i] < 2^106 */
  991. /* delta = z^2 */
  992. felem_square(tmp, z_in);
  993. felem_reduce(delta, tmp);
  994. /* delta[i] < 2^101 */
  995. /* gamma = y^2 */
  996. felem_square(tmp, y_in);
  997. felem_reduce(gamma, tmp);
  998. /* gamma[i] < 2^101 */
  999. felem_shrink(small1, gamma);
  1000. /* beta = x*gamma */
  1001. felem_small_mul(tmp, small1, x_in);
  1002. felem_reduce(beta, tmp);
  1003. /* beta[i] < 2^101 */
  1004. /* alpha = 3*(x-delta)*(x+delta) */
  1005. felem_diff(ftmp, delta);
  1006. /* ftmp[i] < 2^105 + 2^106 < 2^107 */
  1007. felem_sum(ftmp2, delta);
  1008. /* ftmp2[i] < 2^105 + 2^106 < 2^107 */
  1009. felem_scalar(ftmp2, 3);
  1010. /* ftmp2[i] < 3 * 2^107 < 2^109 */
  1011. felem_mul(tmp, ftmp, ftmp2);
  1012. felem_reduce(alpha, tmp);
  1013. /* alpha[i] < 2^101 */
  1014. felem_shrink(small2, alpha);
  1015. /* x' = alpha^2 - 8*beta */
  1016. smallfelem_square(tmp, small2);
  1017. felem_reduce(x_out, tmp);
  1018. felem_assign(ftmp, beta);
  1019. felem_scalar(ftmp, 8);
  1020. /* ftmp[i] < 8 * 2^101 = 2^104 */
  1021. felem_diff(x_out, ftmp);
  1022. /* x_out[i] < 2^105 + 2^101 < 2^106 */
  1023. /* z' = (y + z)^2 - gamma - delta */
  1024. felem_sum(delta, gamma);
  1025. /* delta[i] < 2^101 + 2^101 = 2^102 */
  1026. felem_assign(ftmp, y_in);
  1027. felem_sum(ftmp, z_in);
  1028. /* ftmp[i] < 2^106 + 2^106 = 2^107 */
  1029. felem_square(tmp, ftmp);
  1030. felem_reduce(z_out, tmp);
  1031. felem_diff(z_out, delta);
  1032. /* z_out[i] < 2^105 + 2^101 < 2^106 */
  1033. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  1034. felem_scalar(beta, 4);
  1035. /* beta[i] < 4 * 2^101 = 2^103 */
  1036. felem_diff_zero107(beta, x_out);
  1037. /* beta[i] < 2^107 + 2^103 < 2^108 */
  1038. felem_small_mul(tmp, small2, beta);
  1039. /* tmp[i] < 7 * 2^64 < 2^67 */
  1040. smallfelem_square(tmp2, small1);
  1041. /* tmp2[i] < 7 * 2^64 */
  1042. longfelem_scalar(tmp2, 8);
  1043. /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */
  1044. longfelem_diff(tmp, tmp2);
  1045. /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
  1046. felem_reduce_zero105(y_out, tmp);
  1047. /* y_out[i] < 2^106 */
  1048. }
  1049. /*
  1050. * point_double_small is the same as point_double, except that it operates on
  1051. * smallfelems
  1052. */
  1053. static void
  1054. point_double_small(smallfelem x_out, smallfelem y_out, smallfelem z_out,
  1055. const smallfelem x_in, const smallfelem y_in,
  1056. const smallfelem z_in)
  1057. {
  1058. felem felem_x_out, felem_y_out, felem_z_out;
  1059. felem felem_x_in, felem_y_in, felem_z_in;
  1060. smallfelem_expand(felem_x_in, x_in);
  1061. smallfelem_expand(felem_y_in, y_in);
  1062. smallfelem_expand(felem_z_in, z_in);
  1063. point_double(felem_x_out, felem_y_out, felem_z_out,
  1064. felem_x_in, felem_y_in, felem_z_in);
  1065. felem_shrink(x_out, felem_x_out);
  1066. felem_shrink(y_out, felem_y_out);
  1067. felem_shrink(z_out, felem_z_out);
  1068. }
  1069. /* copy_conditional copies in to out iff mask is all ones. */
  1070. static void copy_conditional(felem out, const felem in, limb mask)
  1071. {
  1072. unsigned i;
  1073. for (i = 0; i < NLIMBS; ++i) {
  1074. const limb tmp = mask & (in[i] ^ out[i]);
  1075. out[i] ^= tmp;
  1076. }
  1077. }
  1078. /* copy_small_conditional copies in to out iff mask is all ones. */
  1079. static void copy_small_conditional(felem out, const smallfelem in, limb mask)
  1080. {
  1081. unsigned i;
  1082. const u64 mask64 = mask;
  1083. for (i = 0; i < NLIMBS; ++i) {
  1084. out[i] = ((limb) (in[i] & mask64)) | (out[i] & ~mask);
  1085. }
  1086. }
  1087. /*-
  1088. * point_add calculates (x1, y1, z1) + (x2, y2, z2)
  1089. *
  1090. * The method is taken from:
  1091. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
  1092. * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
  1093. *
  1094. * This function includes a branch for checking whether the two input points
  1095. * are equal, (while not equal to the point at infinity). This case never
  1096. * happens during single point multiplication, so there is no timing leak for
  1097. * ECDH or ECDSA signing.
  1098. */
  1099. static void point_add(felem x3, felem y3, felem z3,
  1100. const felem x1, const felem y1, const felem z1,
  1101. const int mixed, const smallfelem x2,
  1102. const smallfelem y2, const smallfelem z2)
  1103. {
  1104. felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
  1105. longfelem tmp, tmp2;
  1106. smallfelem small1, small2, small3, small4, small5;
  1107. limb x_equal, y_equal, z1_is_zero, z2_is_zero;
  1108. limb points_equal;
  1109. felem_shrink(small3, z1);
  1110. z1_is_zero = smallfelem_is_zero(small3);
  1111. z2_is_zero = smallfelem_is_zero(z2);
  1112. /* ftmp = z1z1 = z1**2 */
  1113. smallfelem_square(tmp, small3);
  1114. felem_reduce(ftmp, tmp);
  1115. /* ftmp[i] < 2^101 */
  1116. felem_shrink(small1, ftmp);
  1117. if (!mixed) {
  1118. /* ftmp2 = z2z2 = z2**2 */
  1119. smallfelem_square(tmp, z2);
  1120. felem_reduce(ftmp2, tmp);
  1121. /* ftmp2[i] < 2^101 */
  1122. felem_shrink(small2, ftmp2);
  1123. felem_shrink(small5, x1);
  1124. /* u1 = ftmp3 = x1*z2z2 */
  1125. smallfelem_mul(tmp, small5, small2);
  1126. felem_reduce(ftmp3, tmp);
  1127. /* ftmp3[i] < 2^101 */
  1128. /* ftmp5 = z1 + z2 */
  1129. felem_assign(ftmp5, z1);
  1130. felem_small_sum(ftmp5, z2);
  1131. /* ftmp5[i] < 2^107 */
  1132. /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */
  1133. felem_square(tmp, ftmp5);
  1134. felem_reduce(ftmp5, tmp);
  1135. /* ftmp2 = z2z2 + z1z1 */
  1136. felem_sum(ftmp2, ftmp);
  1137. /* ftmp2[i] < 2^101 + 2^101 = 2^102 */
  1138. felem_diff(ftmp5, ftmp2);
  1139. /* ftmp5[i] < 2^105 + 2^101 < 2^106 */
  1140. /* ftmp2 = z2 * z2z2 */
  1141. smallfelem_mul(tmp, small2, z2);
  1142. felem_reduce(ftmp2, tmp);
  1143. /* s1 = ftmp2 = y1 * z2**3 */
  1144. felem_mul(tmp, y1, ftmp2);
  1145. felem_reduce(ftmp6, tmp);
  1146. /* ftmp6[i] < 2^101 */
  1147. } else {
  1148. /*
  1149. * We'll assume z2 = 1 (special case z2 = 0 is handled later)
  1150. */
  1151. /* u1 = ftmp3 = x1*z2z2 */
  1152. felem_assign(ftmp3, x1);
  1153. /* ftmp3[i] < 2^106 */
  1154. /* ftmp5 = 2z1z2 */
  1155. felem_assign(ftmp5, z1);
  1156. felem_scalar(ftmp5, 2);
  1157. /* ftmp5[i] < 2*2^106 = 2^107 */
  1158. /* s1 = ftmp2 = y1 * z2**3 */
  1159. felem_assign(ftmp6, y1);
  1160. /* ftmp6[i] < 2^106 */
  1161. }
  1162. /* u2 = x2*z1z1 */
  1163. smallfelem_mul(tmp, x2, small1);
  1164. felem_reduce(ftmp4, tmp);
  1165. /* h = ftmp4 = u2 - u1 */
  1166. felem_diff_zero107(ftmp4, ftmp3);
  1167. /* ftmp4[i] < 2^107 + 2^101 < 2^108 */
  1168. felem_shrink(small4, ftmp4);
  1169. x_equal = smallfelem_is_zero(small4);
  1170. /* z_out = ftmp5 * h */
  1171. felem_small_mul(tmp, small4, ftmp5);
  1172. felem_reduce(z_out, tmp);
  1173. /* z_out[i] < 2^101 */
  1174. /* ftmp = z1 * z1z1 */
  1175. smallfelem_mul(tmp, small1, small3);
  1176. felem_reduce(ftmp, tmp);
  1177. /* s2 = tmp = y2 * z1**3 */
  1178. felem_small_mul(tmp, y2, ftmp);
  1179. felem_reduce(ftmp5, tmp);
  1180. /* r = ftmp5 = (s2 - s1)*2 */
  1181. felem_diff_zero107(ftmp5, ftmp6);
  1182. /* ftmp5[i] < 2^107 + 2^107 = 2^108 */
  1183. felem_scalar(ftmp5, 2);
  1184. /* ftmp5[i] < 2^109 */
  1185. felem_shrink(small1, ftmp5);
  1186. y_equal = smallfelem_is_zero(small1);
  1187. /*
  1188. * The formulae are incorrect if the points are equal, in affine coordinates
  1189. * (X_1, Y_1) == (X_2, Y_2), so we check for this and do doubling if this
  1190. * happens.
  1191. *
  1192. * We use bitwise operations to avoid potential side-channels introduced by
  1193. * the short-circuiting behaviour of boolean operators.
  1194. *
  1195. * The special case of either point being the point at infinity (z1 and/or
  1196. * z2 are zero), is handled separately later on in this function, so we
  1197. * avoid jumping to point_double here in those special cases.
  1198. */
  1199. points_equal = (x_equal & y_equal & (~z1_is_zero) & (~z2_is_zero));
  1200. if (points_equal) {
  1201. /*
  1202. * This is obviously not constant-time but, as mentioned before, this
  1203. * case never happens during single point multiplication, so there is no
  1204. * timing leak for ECDH or ECDSA signing.
  1205. */
  1206. point_double(x3, y3, z3, x1, y1, z1);
  1207. return;
  1208. }
  1209. /* I = ftmp = (2h)**2 */
  1210. felem_assign(ftmp, ftmp4);
  1211. felem_scalar(ftmp, 2);
  1212. /* ftmp[i] < 2*2^108 = 2^109 */
  1213. felem_square(tmp, ftmp);
  1214. felem_reduce(ftmp, tmp);
  1215. /* J = ftmp2 = h * I */
  1216. felem_mul(tmp, ftmp4, ftmp);
  1217. felem_reduce(ftmp2, tmp);
  1218. /* V = ftmp4 = U1 * I */
  1219. felem_mul(tmp, ftmp3, ftmp);
  1220. felem_reduce(ftmp4, tmp);
  1221. /* x_out = r**2 - J - 2V */
  1222. smallfelem_square(tmp, small1);
  1223. felem_reduce(x_out, tmp);
  1224. felem_assign(ftmp3, ftmp4);
  1225. felem_scalar(ftmp4, 2);
  1226. felem_sum(ftmp4, ftmp2);
  1227. /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */
  1228. felem_diff(x_out, ftmp4);
  1229. /* x_out[i] < 2^105 + 2^101 */
  1230. /* y_out = r(V-x_out) - 2 * s1 * J */
  1231. felem_diff_zero107(ftmp3, x_out);
  1232. /* ftmp3[i] < 2^107 + 2^101 < 2^108 */
  1233. felem_small_mul(tmp, small1, ftmp3);
  1234. felem_mul(tmp2, ftmp6, ftmp2);
  1235. longfelem_scalar(tmp2, 2);
  1236. /* tmp2[i] < 2*2^67 = 2^68 */
  1237. longfelem_diff(tmp, tmp2);
  1238. /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
  1239. felem_reduce_zero105(y_out, tmp);
  1240. /* y_out[i] < 2^106 */
  1241. copy_small_conditional(x_out, x2, z1_is_zero);
  1242. copy_conditional(x_out, x1, z2_is_zero);
  1243. copy_small_conditional(y_out, y2, z1_is_zero);
  1244. copy_conditional(y_out, y1, z2_is_zero);
  1245. copy_small_conditional(z_out, z2, z1_is_zero);
  1246. copy_conditional(z_out, z1, z2_is_zero);
  1247. felem_assign(x3, x_out);
  1248. felem_assign(y3, y_out);
  1249. felem_assign(z3, z_out);
  1250. }
  1251. /*
  1252. * point_add_small is the same as point_add, except that it operates on
  1253. * smallfelems
  1254. */
  1255. static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
  1256. smallfelem x1, smallfelem y1, smallfelem z1,
  1257. smallfelem x2, smallfelem y2, smallfelem z2)
  1258. {
  1259. felem felem_x3, felem_y3, felem_z3;
  1260. felem felem_x1, felem_y1, felem_z1;
  1261. smallfelem_expand(felem_x1, x1);
  1262. smallfelem_expand(felem_y1, y1);
  1263. smallfelem_expand(felem_z1, z1);
  1264. point_add(felem_x3, felem_y3, felem_z3, felem_x1, felem_y1, felem_z1, 0,
  1265. x2, y2, z2);
  1266. felem_shrink(x3, felem_x3);
  1267. felem_shrink(y3, felem_y3);
  1268. felem_shrink(z3, felem_z3);
  1269. }
  1270. /*-
  1271. * Base point pre computation
  1272. * --------------------------
  1273. *
  1274. * Two different sorts of precomputed tables are used in the following code.
  1275. * Each contain various points on the curve, where each point is three field
  1276. * elements (x, y, z).
  1277. *
  1278. * For the base point table, z is usually 1 (0 for the point at infinity).
  1279. * This table has 2 * 16 elements, starting with the following:
  1280. * index | bits | point
  1281. * ------+---------+------------------------------
  1282. * 0 | 0 0 0 0 | 0G
  1283. * 1 | 0 0 0 1 | 1G
  1284. * 2 | 0 0 1 0 | 2^64G
  1285. * 3 | 0 0 1 1 | (2^64 + 1)G
  1286. * 4 | 0 1 0 0 | 2^128G
  1287. * 5 | 0 1 0 1 | (2^128 + 1)G
  1288. * 6 | 0 1 1 0 | (2^128 + 2^64)G
  1289. * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
  1290. * 8 | 1 0 0 0 | 2^192G
  1291. * 9 | 1 0 0 1 | (2^192 + 1)G
  1292. * 10 | 1 0 1 0 | (2^192 + 2^64)G
  1293. * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
  1294. * 12 | 1 1 0 0 | (2^192 + 2^128)G
  1295. * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
  1296. * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
  1297. * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
  1298. * followed by a copy of this with each element multiplied by 2^32.
  1299. *
  1300. * The reason for this is so that we can clock bits into four different
  1301. * locations when doing simple scalar multiplies against the base point,
  1302. * and then another four locations using the second 16 elements.
  1303. *
  1304. * Tables for other points have table[i] = iG for i in 0 .. 16. */
  1305. /* gmul is the table of precomputed base points */
  1306. static const smallfelem gmul[2][16][3] = {
  1307. {{{0, 0, 0, 0},
  1308. {0, 0, 0, 0},
  1309. {0, 0, 0, 0}},
  1310. {{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2,
  1311. 0x6b17d1f2e12c4247},
  1312. {0xcbb6406837bf51f5, 0x2bce33576b315ece, 0x8ee7eb4a7c0f9e16,
  1313. 0x4fe342e2fe1a7f9b},
  1314. {1, 0, 0, 0}},
  1315. {{0x90e75cb48e14db63, 0x29493baaad651f7e, 0x8492592e326e25de,
  1316. 0x0fa822bc2811aaa5},
  1317. {0xe41124545f462ee7, 0x34b1a65050fe82f5, 0x6f4ad4bcb3df188b,
  1318. 0xbff44ae8f5dba80d},
  1319. {1, 0, 0, 0}},
  1320. {{0x93391ce2097992af, 0xe96c98fd0d35f1fa, 0xb257c0de95e02789,
  1321. 0x300a4bbc89d6726f},
  1322. {0xaa54a291c08127a0, 0x5bb1eeada9d806a5, 0x7f1ddb25ff1e3c6f,
  1323. 0x72aac7e0d09b4644},
  1324. {1, 0, 0, 0}},
  1325. {{0x57c84fc9d789bd85, 0xfc35ff7dc297eac3, 0xfb982fd588c6766e,
  1326. 0x447d739beedb5e67},
  1327. {0x0c7e33c972e25b32, 0x3d349b95a7fae500, 0xe12e9d953a4aaff7,
  1328. 0x2d4825ab834131ee},
  1329. {1, 0, 0, 0}},
  1330. {{0x13949c932a1d367f, 0xef7fbd2b1a0a11b7, 0xddc6068bb91dfc60,
  1331. 0xef9519328a9c72ff},
  1332. {0x196035a77376d8a8, 0x23183b0895ca1740, 0xc1ee9807022c219c,
  1333. 0x611e9fc37dbb2c9b},
  1334. {1, 0, 0, 0}},
  1335. {{0xcae2b1920b57f4bc, 0x2936df5ec6c9bc36, 0x7dea6482e11238bf,
  1336. 0x550663797b51f5d8},
  1337. {0x44ffe216348a964c, 0x9fb3d576dbdefbe1, 0x0afa40018d9d50e5,
  1338. 0x157164848aecb851},
  1339. {1, 0, 0, 0}},
  1340. {{0xe48ecafffc5cde01, 0x7ccd84e70d715f26, 0xa2e8f483f43e4391,
  1341. 0xeb5d7745b21141ea},
  1342. {0xcac917e2731a3479, 0x85f22cfe2844b645, 0x0990e6a158006cee,
  1343. 0xeafd72ebdbecc17b},
  1344. {1, 0, 0, 0}},
  1345. {{0x6cf20ffb313728be, 0x96439591a3c6b94a, 0x2736ff8344315fc5,
  1346. 0xa6d39677a7849276},
  1347. {0xf2bab833c357f5f4, 0x824a920c2284059b, 0x66b8babd2d27ecdf,
  1348. 0x674f84749b0b8816},
  1349. {1, 0, 0, 0}},
  1350. {{0x2df48c04677c8a3e, 0x74e02f080203a56b, 0x31855f7db8c7fedb,
  1351. 0x4e769e7672c9ddad},
  1352. {0xa4c36165b824bbb0, 0xfb9ae16f3b9122a5, 0x1ec0057206947281,
  1353. 0x42b99082de830663},
  1354. {1, 0, 0, 0}},
  1355. {{0x6ef95150dda868b9, 0xd1f89e799c0ce131, 0x7fdc1ca008a1c478,
  1356. 0x78878ef61c6ce04d},
  1357. {0x9c62b9121fe0d976, 0x6ace570ebde08d4f, 0xde53142c12309def,
  1358. 0xb6cb3f5d7b72c321},
  1359. {1, 0, 0, 0}},
  1360. {{0x7f991ed2c31a3573, 0x5b82dd5bd54fb496, 0x595c5220812ffcae,
  1361. 0x0c88bc4d716b1287},
  1362. {0x3a57bf635f48aca8, 0x7c8181f4df2564f3, 0x18d1b5b39c04e6aa,
  1363. 0xdd5ddea3f3901dc6},
  1364. {1, 0, 0, 0}},
  1365. {{0xe96a79fb3e72ad0c, 0x43a0a28c42ba792f, 0xefe0a423083e49f3,
  1366. 0x68f344af6b317466},
  1367. {0xcdfe17db3fb24d4a, 0x668bfc2271f5c626, 0x604ed93c24d67ff3,
  1368. 0x31b9c405f8540a20},
  1369. {1, 0, 0, 0}},
  1370. {{0xd36b4789a2582e7f, 0x0d1a10144ec39c28, 0x663c62c3edbad7a0,
  1371. 0x4052bf4b6f461db9},
  1372. {0x235a27c3188d25eb, 0xe724f33999bfcc5b, 0x862be6bd71d70cc8,
  1373. 0xfecf4d5190b0fc61},
  1374. {1, 0, 0, 0}},
  1375. {{0x74346c10a1d4cfac, 0xafdf5cc08526a7a4, 0x123202a8f62bff7a,
  1376. 0x1eddbae2c802e41a},
  1377. {0x8fa0af2dd603f844, 0x36e06b7e4c701917, 0x0c45f45273db33a0,
  1378. 0x43104d86560ebcfc},
  1379. {1, 0, 0, 0}},
  1380. {{0x9615b5110d1d78e5, 0x66b0de3225c4744b, 0x0a4a46fb6aaf363a,
  1381. 0xb48e26b484f7a21c},
  1382. {0x06ebb0f621a01b2d, 0xc004e4048b7b0f98, 0x64131bcdfed6f668,
  1383. 0xfac015404d4d3dab},
  1384. {1, 0, 0, 0}}},
  1385. {{{0, 0, 0, 0},
  1386. {0, 0, 0, 0},
  1387. {0, 0, 0, 0}},
  1388. {{0x3a5a9e22185a5943, 0x1ab919365c65dfb6, 0x21656b32262c71da,
  1389. 0x7fe36b40af22af89},
  1390. {0xd50d152c699ca101, 0x74b3d5867b8af212, 0x9f09f40407dca6f1,
  1391. 0xe697d45825b63624},
  1392. {1, 0, 0, 0}},
  1393. {{0xa84aa9397512218e, 0xe9a521b074ca0141, 0x57880b3a18a2e902,
  1394. 0x4a5b506612a677a6},
  1395. {0x0beada7a4c4f3840, 0x626db15419e26d9d, 0xc42604fbe1627d40,
  1396. 0xeb13461ceac089f1},
  1397. {1, 0, 0, 0}},
  1398. {{0xf9faed0927a43281, 0x5e52c4144103ecbc, 0xc342967aa815c857,
  1399. 0x0781b8291c6a220a},
  1400. {0x5a8343ceeac55f80, 0x88f80eeee54a05e3, 0x97b2a14f12916434,
  1401. 0x690cde8df0151593},
  1402. {1, 0, 0, 0}},
  1403. {{0xaee9c75df7f82f2a, 0x9e4c35874afdf43a, 0xf5622df437371326,
  1404. 0x8a535f566ec73617},
  1405. {0xc5f9a0ac223094b7, 0xcde533864c8c7669, 0x37e02819085a92bf,
  1406. 0x0455c08468b08bd7},
  1407. {1, 0, 0, 0}},
  1408. {{0x0c0a6e2c9477b5d9, 0xf9a4bf62876dc444, 0x5050a949b6cdc279,
  1409. 0x06bada7ab77f8276},
  1410. {0xc8b4aed1ea48dac9, 0xdebd8a4b7ea1070f, 0x427d49101366eb70,
  1411. 0x5b476dfd0e6cb18a},
  1412. {1, 0, 0, 0}},
  1413. {{0x7c5c3e44278c340a, 0x4d54606812d66f3b, 0x29a751b1ae23c5d8,
  1414. 0x3e29864e8a2ec908},
  1415. {0x142d2a6626dbb850, 0xad1744c4765bd780, 0x1f150e68e322d1ed,
  1416. 0x239b90ea3dc31e7e},
  1417. {1, 0, 0, 0}},
  1418. {{0x78c416527a53322a, 0x305dde6709776f8e, 0xdbcab759f8862ed4,
  1419. 0x820f4dd949f72ff7},
  1420. {0x6cc544a62b5debd4, 0x75be5d937b4e8cc4, 0x1b481b1b215c14d3,
  1421. 0x140406ec783a05ec},
  1422. {1, 0, 0, 0}},
  1423. {{0x6a703f10e895df07, 0xfd75f3fa01876bd8, 0xeb5b06e70ce08ffe,
  1424. 0x68f6b8542783dfee},
  1425. {0x90c76f8a78712655, 0xcf5293d2f310bf7f, 0xfbc8044dfda45028,
  1426. 0xcbe1feba92e40ce6},
  1427. {1, 0, 0, 0}},
  1428. {{0xe998ceea4396e4c1, 0xfc82ef0b6acea274, 0x230f729f2250e927,
  1429. 0xd0b2f94d2f420109},
  1430. {0x4305adddb38d4966, 0x10b838f8624c3b45, 0x7db2636658954e7a,
  1431. 0x971459828b0719e5},
  1432. {1, 0, 0, 0}},
  1433. {{0x4bd6b72623369fc9, 0x57f2929e53d0b876, 0xc2d5cba4f2340687,
  1434. 0x961610004a866aba},
  1435. {0x49997bcd2e407a5e, 0x69ab197d92ddcb24, 0x2cf1f2438fe5131c,
  1436. 0x7acb9fadcee75e44},
  1437. {1, 0, 0, 0}},
  1438. {{0x254e839423d2d4c0, 0xf57f0c917aea685b, 0xa60d880f6f75aaea,
  1439. 0x24eb9acca333bf5b},
  1440. {0xe3de4ccb1cda5dea, 0xfeef9341c51a6b4f, 0x743125f88bac4c4d,
  1441. 0x69f891c5acd079cc},
  1442. {1, 0, 0, 0}},
  1443. {{0xeee44b35702476b5, 0x7ed031a0e45c2258, 0xb422d1e7bd6f8514,
  1444. 0xe51f547c5972a107},
  1445. {0xa25bcd6fc9cf343d, 0x8ca922ee097c184e, 0xa62f98b3a9fe9a06,
  1446. 0x1c309a2b25bb1387},
  1447. {1, 0, 0, 0}},
  1448. {{0x9295dbeb1967c459, 0xb00148833472c98e, 0xc504977708011828,
  1449. 0x20b87b8aa2c4e503},
  1450. {0x3063175de057c277, 0x1bd539338fe582dd, 0x0d11adef5f69a044,
  1451. 0xf5c6fa49919776be},
  1452. {1, 0, 0, 0}},
  1453. {{0x8c944e760fd59e11, 0x3876cba1102fad5f, 0xa454c3fad83faa56,
  1454. 0x1ed7d1b9332010b9},
  1455. {0xa1011a270024b889, 0x05e4d0dcac0cd344, 0x52b520f0eb6a2a24,
  1456. 0x3a2b03f03217257a},
  1457. {1, 0, 0, 0}},
  1458. {{0xf20fc2afdf1d043d, 0xf330240db58d5a62, 0xfc7d229ca0058c3b,
  1459. 0x15fee545c78dd9f6},
  1460. {0x501e82885bc98cda, 0x41ef80e5d046ac04, 0x557d9f49461210fb,
  1461. 0x4ab5b6b2b8753f81},
  1462. {1, 0, 0, 0}}}
  1463. };
  1464. /*
  1465. * select_point selects the |idx|th point from a precomputation table and
  1466. * copies it to out.
  1467. */
  1468. static void select_point(const u64 idx, unsigned int size,
  1469. const smallfelem pre_comp[16][3], smallfelem out[3])
  1470. {
  1471. unsigned i, j;
  1472. u64 *outlimbs = &out[0][0];
  1473. memset(out, 0, sizeof(*out) * 3);
  1474. for (i = 0; i < size; i++) {
  1475. const u64 *inlimbs = (u64 *)&pre_comp[i][0][0];
  1476. u64 mask = i ^ idx;
  1477. mask |= mask >> 4;
  1478. mask |= mask >> 2;
  1479. mask |= mask >> 1;
  1480. mask &= 1;
  1481. mask--;
  1482. for (j = 0; j < NLIMBS * 3; j++)
  1483. outlimbs[j] |= inlimbs[j] & mask;
  1484. }
  1485. }
  1486. /* get_bit returns the |i|th bit in |in| */
  1487. static char get_bit(const felem_bytearray in, int i)
  1488. {
  1489. if ((i < 0) || (i >= 256))
  1490. return 0;
  1491. return (in[i >> 3] >> (i & 7)) & 1;
  1492. }
  1493. /*
  1494. * Interleaved point multiplication using precomputed point multiples: The
  1495. * small point multiples 0*P, 1*P, ..., 17*P are in pre_comp[], the scalars
  1496. * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
  1497. * generator, using certain (large) precomputed multiples in g_pre_comp.
  1498. * Output point (X, Y, Z) is stored in x_out, y_out, z_out
  1499. */
  1500. static void batch_mul(felem x_out, felem y_out, felem z_out,
  1501. const felem_bytearray scalars[],
  1502. const unsigned num_points, const u8 *g_scalar,
  1503. const int mixed, const smallfelem pre_comp[][17][3],
  1504. const smallfelem g_pre_comp[2][16][3])
  1505. {
  1506. int i, skip;
  1507. unsigned num, gen_mul = (g_scalar != NULL);
  1508. felem nq[3], ftmp;
  1509. smallfelem tmp[3];
  1510. u64 bits;
  1511. u8 sign, digit;
  1512. /* set nq to the point at infinity */
  1513. memset(nq, 0, sizeof(nq));
  1514. /*
  1515. * Loop over all scalars msb-to-lsb, interleaving additions of multiples
  1516. * of the generator (two in each of the last 32 rounds) and additions of
  1517. * other points multiples (every 5th round).
  1518. */
  1519. skip = 1; /* save two point operations in the first
  1520. * round */
  1521. for (i = (num_points ? 255 : 31); i >= 0; --i) {
  1522. /* double */
  1523. if (!skip)
  1524. point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
  1525. /* add multiples of the generator */
  1526. if (gen_mul && (i <= 31)) {
  1527. /* first, look 32 bits upwards */
  1528. bits = get_bit(g_scalar, i + 224) << 3;
  1529. bits |= get_bit(g_scalar, i + 160) << 2;
  1530. bits |= get_bit(g_scalar, i + 96) << 1;
  1531. bits |= get_bit(g_scalar, i + 32);
  1532. /* select the point to add, in constant time */
  1533. select_point(bits, 16, g_pre_comp[1], tmp);
  1534. if (!skip) {
  1535. /* Arg 1 below is for "mixed" */
  1536. point_add(nq[0], nq[1], nq[2],
  1537. nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
  1538. } else {
  1539. smallfelem_expand(nq[0], tmp[0]);
  1540. smallfelem_expand(nq[1], tmp[1]);
  1541. smallfelem_expand(nq[2], tmp[2]);
  1542. skip = 0;
  1543. }
  1544. /* second, look at the current position */
  1545. bits = get_bit(g_scalar, i + 192) << 3;
  1546. bits |= get_bit(g_scalar, i + 128) << 2;
  1547. bits |= get_bit(g_scalar, i + 64) << 1;
  1548. bits |= get_bit(g_scalar, i);
  1549. /* select the point to add, in constant time */
  1550. select_point(bits, 16, g_pre_comp[0], tmp);
  1551. /* Arg 1 below is for "mixed" */
  1552. point_add(nq[0], nq[1], nq[2],
  1553. nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
  1554. }
  1555. /* do other additions every 5 doublings */
  1556. if (num_points && (i % 5 == 0)) {
  1557. /* loop over all scalars */
  1558. for (num = 0; num < num_points; ++num) {
  1559. bits = get_bit(scalars[num], i + 4) << 5;
  1560. bits |= get_bit(scalars[num], i + 3) << 4;
  1561. bits |= get_bit(scalars[num], i + 2) << 3;
  1562. bits |= get_bit(scalars[num], i + 1) << 2;
  1563. bits |= get_bit(scalars[num], i) << 1;
  1564. bits |= get_bit(scalars[num], i - 1);
  1565. ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
  1566. /*
  1567. * select the point to add or subtract, in constant time
  1568. */
  1569. select_point(digit, 17, pre_comp[num], tmp);
  1570. smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative
  1571. * point */
  1572. copy_small_conditional(ftmp, tmp[1], (((limb) sign) - 1));
  1573. felem_contract(tmp[1], ftmp);
  1574. if (!skip) {
  1575. point_add(nq[0], nq[1], nq[2],
  1576. nq[0], nq[1], nq[2],
  1577. mixed, tmp[0], tmp[1], tmp[2]);
  1578. } else {
  1579. smallfelem_expand(nq[0], tmp[0]);
  1580. smallfelem_expand(nq[1], tmp[1]);
  1581. smallfelem_expand(nq[2], tmp[2]);
  1582. skip = 0;
  1583. }
  1584. }
  1585. }
  1586. }
  1587. felem_assign(x_out, nq[0]);
  1588. felem_assign(y_out, nq[1]);
  1589. felem_assign(z_out, nq[2]);
  1590. }
  1591. /* Precomputation for the group generator. */
  1592. struct nistp256_pre_comp_st {
  1593. smallfelem g_pre_comp[2][16][3];
  1594. CRYPTO_REF_COUNT references;
  1595. CRYPTO_RWLOCK *lock;
  1596. };
  1597. const EC_METHOD *EC_GFp_nistp256_method(void)
  1598. {
  1599. static const EC_METHOD ret = {
  1600. EC_FLAGS_DEFAULT_OCT,
  1601. NID_X9_62_prime_field,
  1602. ec_GFp_nistp256_group_init,
  1603. ec_GFp_simple_group_finish,
  1604. ec_GFp_simple_group_clear_finish,
  1605. ec_GFp_nist_group_copy,
  1606. ec_GFp_nistp256_group_set_curve,
  1607. ec_GFp_simple_group_get_curve,
  1608. ec_GFp_simple_group_get_degree,
  1609. ec_group_simple_order_bits,
  1610. ec_GFp_simple_group_check_discriminant,
  1611. ec_GFp_simple_point_init,
  1612. ec_GFp_simple_point_finish,
  1613. ec_GFp_simple_point_clear_finish,
  1614. ec_GFp_simple_point_copy,
  1615. ec_GFp_simple_point_set_to_infinity,
  1616. ec_GFp_simple_set_Jprojective_coordinates_GFp,
  1617. ec_GFp_simple_get_Jprojective_coordinates_GFp,
  1618. ec_GFp_simple_point_set_affine_coordinates,
  1619. ec_GFp_nistp256_point_get_affine_coordinates,
  1620. 0 /* point_set_compressed_coordinates */ ,
  1621. 0 /* point2oct */ ,
  1622. 0 /* oct2point */ ,
  1623. ec_GFp_simple_add,
  1624. ec_GFp_simple_dbl,
  1625. ec_GFp_simple_invert,
  1626. ec_GFp_simple_is_at_infinity,
  1627. ec_GFp_simple_is_on_curve,
  1628. ec_GFp_simple_cmp,
  1629. ec_GFp_simple_make_affine,
  1630. ec_GFp_simple_points_make_affine,
  1631. ec_GFp_nistp256_points_mul,
  1632. ec_GFp_nistp256_precompute_mult,
  1633. ec_GFp_nistp256_have_precompute_mult,
  1634. ec_GFp_nist_field_mul,
  1635. ec_GFp_nist_field_sqr,
  1636. 0 /* field_div */ ,
  1637. ec_GFp_simple_field_inv,
  1638. 0 /* field_encode */ ,
  1639. 0 /* field_decode */ ,
  1640. 0, /* field_set_to_one */
  1641. ec_key_simple_priv2oct,
  1642. ec_key_simple_oct2priv,
  1643. 0, /* set private */
  1644. ec_key_simple_generate_key,
  1645. ec_key_simple_check_key,
  1646. ec_key_simple_generate_public_key,
  1647. 0, /* keycopy */
  1648. 0, /* keyfinish */
  1649. ecdh_simple_compute_key,
  1650. 0, /* field_inverse_mod_ord */
  1651. 0, /* blind_coordinates */
  1652. 0, /* ladder_pre */
  1653. 0, /* ladder_step */
  1654. 0 /* ladder_post */
  1655. };
  1656. return &ret;
  1657. }
  1658. /******************************************************************************/
  1659. /*
  1660. * FUNCTIONS TO MANAGE PRECOMPUTATION
  1661. */
  1662. static NISTP256_PRE_COMP *nistp256_pre_comp_new(void)
  1663. {
  1664. NISTP256_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
  1665. if (ret == NULL) {
  1666. ECerr(EC_F_NISTP256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  1667. return ret;
  1668. }
  1669. ret->references = 1;
  1670. ret->lock = CRYPTO_THREAD_lock_new();
  1671. if (ret->lock == NULL) {
  1672. ECerr(EC_F_NISTP256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  1673. OPENSSL_free(ret);
  1674. return NULL;
  1675. }
  1676. return ret;
  1677. }
  1678. NISTP256_PRE_COMP *EC_nistp256_pre_comp_dup(NISTP256_PRE_COMP *p)
  1679. {
  1680. int i;
  1681. if (p != NULL)
  1682. CRYPTO_UP_REF(&p->references, &i, p->lock);
  1683. return p;
  1684. }
  1685. void EC_nistp256_pre_comp_free(NISTP256_PRE_COMP *pre)
  1686. {
  1687. int i;
  1688. if (pre == NULL)
  1689. return;
  1690. CRYPTO_DOWN_REF(&pre->references, &i, pre->lock);
  1691. REF_PRINT_COUNT("EC_nistp256", x);
  1692. if (i > 0)
  1693. return;
  1694. REF_ASSERT_ISNT(i < 0);
  1695. CRYPTO_THREAD_lock_free(pre->lock);
  1696. OPENSSL_free(pre);
  1697. }
  1698. /******************************************************************************/
  1699. /*
  1700. * OPENSSL EC_METHOD FUNCTIONS
  1701. */
  1702. int ec_GFp_nistp256_group_init(EC_GROUP *group)
  1703. {
  1704. int ret;
  1705. ret = ec_GFp_simple_group_init(group);
  1706. group->a_is_minus3 = 1;
  1707. return ret;
  1708. }
  1709. int ec_GFp_nistp256_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  1710. const BIGNUM *a, const BIGNUM *b,
  1711. BN_CTX *ctx)
  1712. {
  1713. int ret = 0;
  1714. BN_CTX *new_ctx = NULL;
  1715. BIGNUM *curve_p, *curve_a, *curve_b;
  1716. if (ctx == NULL)
  1717. if ((ctx = new_ctx = BN_CTX_new()) == NULL)
  1718. return 0;
  1719. BN_CTX_start(ctx);
  1720. curve_p = BN_CTX_get(ctx);
  1721. curve_a = BN_CTX_get(ctx);
  1722. curve_b = BN_CTX_get(ctx);
  1723. if (curve_b == NULL)
  1724. goto err;
  1725. BN_bin2bn(nistp256_curve_params[0], sizeof(felem_bytearray), curve_p);
  1726. BN_bin2bn(nistp256_curve_params[1], sizeof(felem_bytearray), curve_a);
  1727. BN_bin2bn(nistp256_curve_params[2], sizeof(felem_bytearray), curve_b);
  1728. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
  1729. ECerr(EC_F_EC_GFP_NISTP256_GROUP_SET_CURVE,
  1730. EC_R_WRONG_CURVE_PARAMETERS);
  1731. goto err;
  1732. }
  1733. group->field_mod_func = BN_nist_mod_256;
  1734. ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  1735. err:
  1736. BN_CTX_end(ctx);
  1737. BN_CTX_free(new_ctx);
  1738. return ret;
  1739. }
  1740. /*
  1741. * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
  1742. * (X/Z^2, Y/Z^3)
  1743. */
  1744. int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group,
  1745. const EC_POINT *point,
  1746. BIGNUM *x, BIGNUM *y,
  1747. BN_CTX *ctx)
  1748. {
  1749. felem z1, z2, x_in, y_in;
  1750. smallfelem x_out, y_out;
  1751. longfelem tmp;
  1752. if (EC_POINT_is_at_infinity(group, point)) {
  1753. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1754. EC_R_POINT_AT_INFINITY);
  1755. return 0;
  1756. }
  1757. if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
  1758. (!BN_to_felem(z1, point->Z)))
  1759. return 0;
  1760. felem_inv(z2, z1);
  1761. felem_square(tmp, z2);
  1762. felem_reduce(z1, tmp);
  1763. felem_mul(tmp, x_in, z1);
  1764. felem_reduce(x_in, tmp);
  1765. felem_contract(x_out, x_in);
  1766. if (x != NULL) {
  1767. if (!smallfelem_to_BN(x, x_out)) {
  1768. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1769. ERR_R_BN_LIB);
  1770. return 0;
  1771. }
  1772. }
  1773. felem_mul(tmp, z1, z2);
  1774. felem_reduce(z1, tmp);
  1775. felem_mul(tmp, y_in, z1);
  1776. felem_reduce(y_in, tmp);
  1777. felem_contract(y_out, y_in);
  1778. if (y != NULL) {
  1779. if (!smallfelem_to_BN(y, y_out)) {
  1780. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1781. ERR_R_BN_LIB);
  1782. return 0;
  1783. }
  1784. }
  1785. return 1;
  1786. }
  1787. /* points below is of size |num|, and tmp_smallfelems is of size |num+1| */
  1788. static void make_points_affine(size_t num, smallfelem points[][3],
  1789. smallfelem tmp_smallfelems[])
  1790. {
  1791. /*
  1792. * Runs in constant time, unless an input is the point at infinity (which
  1793. * normally shouldn't happen).
  1794. */
  1795. ec_GFp_nistp_points_make_affine_internal(num,
  1796. points,
  1797. sizeof(smallfelem),
  1798. tmp_smallfelems,
  1799. (void (*)(void *))smallfelem_one,
  1800. smallfelem_is_zero_int,
  1801. (void (*)(void *, const void *))
  1802. smallfelem_assign,
  1803. (void (*)(void *, const void *))
  1804. smallfelem_square_contract,
  1805. (void (*)
  1806. (void *, const void *,
  1807. const void *))
  1808. smallfelem_mul_contract,
  1809. (void (*)(void *, const void *))
  1810. smallfelem_inv_contract,
  1811. /* nothing to contract */
  1812. (void (*)(void *, const void *))
  1813. smallfelem_assign);
  1814. }
  1815. /*
  1816. * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
  1817. * values Result is stored in r (r can equal one of the inputs).
  1818. */
  1819. int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
  1820. const BIGNUM *scalar, size_t num,
  1821. const EC_POINT *points[],
  1822. const BIGNUM *scalars[], BN_CTX *ctx)
  1823. {
  1824. int ret = 0;
  1825. int j;
  1826. int mixed = 0;
  1827. BIGNUM *x, *y, *z, *tmp_scalar;
  1828. felem_bytearray g_secret;
  1829. felem_bytearray *secrets = NULL;
  1830. smallfelem (*pre_comp)[17][3] = NULL;
  1831. smallfelem *tmp_smallfelems = NULL;
  1832. unsigned i;
  1833. int num_bytes;
  1834. int have_pre_comp = 0;
  1835. size_t num_points = num;
  1836. smallfelem x_in, y_in, z_in;
  1837. felem x_out, y_out, z_out;
  1838. NISTP256_PRE_COMP *pre = NULL;
  1839. const smallfelem(*g_pre_comp)[16][3] = NULL;
  1840. EC_POINT *generator = NULL;
  1841. const EC_POINT *p = NULL;
  1842. const BIGNUM *p_scalar = NULL;
  1843. BN_CTX_start(ctx);
  1844. x = BN_CTX_get(ctx);
  1845. y = BN_CTX_get(ctx);
  1846. z = BN_CTX_get(ctx);
  1847. tmp_scalar = BN_CTX_get(ctx);
  1848. if (tmp_scalar == NULL)
  1849. goto err;
  1850. if (scalar != NULL) {
  1851. pre = group->pre_comp.nistp256;
  1852. if (pre)
  1853. /* we have precomputation, try to use it */
  1854. g_pre_comp = (const smallfelem(*)[16][3])pre->g_pre_comp;
  1855. else
  1856. /* try to use the standard precomputation */
  1857. g_pre_comp = &gmul[0];
  1858. generator = EC_POINT_new(group);
  1859. if (generator == NULL)
  1860. goto err;
  1861. /* get the generator from precomputation */
  1862. if (!smallfelem_to_BN(x, g_pre_comp[0][1][0]) ||
  1863. !smallfelem_to_BN(y, g_pre_comp[0][1][1]) ||
  1864. !smallfelem_to_BN(z, g_pre_comp[0][1][2])) {
  1865. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1866. goto err;
  1867. }
  1868. if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
  1869. generator, x, y, z,
  1870. ctx))
  1871. goto err;
  1872. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1873. /* precomputation matches generator */
  1874. have_pre_comp = 1;
  1875. else
  1876. /*
  1877. * we don't have valid precomputation: treat the generator as a
  1878. * random point
  1879. */
  1880. num_points++;
  1881. }
  1882. if (num_points > 0) {
  1883. if (num_points >= 3) {
  1884. /*
  1885. * unless we precompute multiples for just one or two points,
  1886. * converting those into affine form is time well spent
  1887. */
  1888. mixed = 1;
  1889. }
  1890. secrets = OPENSSL_malloc(sizeof(*secrets) * num_points);
  1891. pre_comp = OPENSSL_malloc(sizeof(*pre_comp) * num_points);
  1892. if (mixed)
  1893. tmp_smallfelems =
  1894. OPENSSL_malloc(sizeof(*tmp_smallfelems) * (num_points * 17 + 1));
  1895. if ((secrets == NULL) || (pre_comp == NULL)
  1896. || (mixed && (tmp_smallfelems == NULL))) {
  1897. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
  1898. goto err;
  1899. }
  1900. /*
  1901. * we treat NULL scalars as 0, and NULL points as points at infinity,
  1902. * i.e., they contribute nothing to the linear combination
  1903. */
  1904. memset(secrets, 0, sizeof(*secrets) * num_points);
  1905. memset(pre_comp, 0, sizeof(*pre_comp) * num_points);
  1906. for (i = 0; i < num_points; ++i) {
  1907. if (i == num) {
  1908. /*
  1909. * we didn't have a valid precomputation, so we pick the
  1910. * generator
  1911. */
  1912. p = EC_GROUP_get0_generator(group);
  1913. p_scalar = scalar;
  1914. } else {
  1915. /* the i^th point */
  1916. p = points[i];
  1917. p_scalar = scalars[i];
  1918. }
  1919. if ((p_scalar != NULL) && (p != NULL)) {
  1920. /* reduce scalar to 0 <= scalar < 2^256 */
  1921. if ((BN_num_bits(p_scalar) > 256)
  1922. || (BN_is_negative(p_scalar))) {
  1923. /*
  1924. * this is an unusual input, and we don't guarantee
  1925. * constant-timeness
  1926. */
  1927. if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
  1928. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1929. goto err;
  1930. }
  1931. num_bytes = BN_bn2lebinpad(tmp_scalar,
  1932. secrets[i], sizeof(secrets[i]));
  1933. } else {
  1934. num_bytes = BN_bn2lebinpad(p_scalar,
  1935. secrets[i], sizeof(secrets[i]));
  1936. }
  1937. if (num_bytes < 0) {
  1938. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1939. goto err;
  1940. }
  1941. /* precompute multiples */
  1942. if ((!BN_to_felem(x_out, p->X)) ||
  1943. (!BN_to_felem(y_out, p->Y)) ||
  1944. (!BN_to_felem(z_out, p->Z)))
  1945. goto err;
  1946. felem_shrink(pre_comp[i][1][0], x_out);
  1947. felem_shrink(pre_comp[i][1][1], y_out);
  1948. felem_shrink(pre_comp[i][1][2], z_out);
  1949. for (j = 2; j <= 16; ++j) {
  1950. if (j & 1) {
  1951. point_add_small(pre_comp[i][j][0], pre_comp[i][j][1],
  1952. pre_comp[i][j][2], pre_comp[i][1][0],
  1953. pre_comp[i][1][1], pre_comp[i][1][2],
  1954. pre_comp[i][j - 1][0],
  1955. pre_comp[i][j - 1][1],
  1956. pre_comp[i][j - 1][2]);
  1957. } else {
  1958. point_double_small(pre_comp[i][j][0],
  1959. pre_comp[i][j][1],
  1960. pre_comp[i][j][2],
  1961. pre_comp[i][j / 2][0],
  1962. pre_comp[i][j / 2][1],
  1963. pre_comp[i][j / 2][2]);
  1964. }
  1965. }
  1966. }
  1967. }
  1968. if (mixed)
  1969. make_points_affine(num_points * 17, pre_comp[0], tmp_smallfelems);
  1970. }
  1971. /* the scalar for the generator */
  1972. if ((scalar != NULL) && (have_pre_comp)) {
  1973. memset(g_secret, 0, sizeof(g_secret));
  1974. /* reduce scalar to 0 <= scalar < 2^256 */
  1975. if ((BN_num_bits(scalar) > 256) || (BN_is_negative(scalar))) {
  1976. /*
  1977. * this is an unusual input, and we don't guarantee
  1978. * constant-timeness
  1979. */
  1980. if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
  1981. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1982. goto err;
  1983. }
  1984. num_bytes = BN_bn2lebinpad(tmp_scalar, g_secret, sizeof(g_secret));
  1985. } else {
  1986. num_bytes = BN_bn2lebinpad(scalar, g_secret, sizeof(g_secret));
  1987. }
  1988. /* do the multiplication with generator precomputation */
  1989. batch_mul(x_out, y_out, z_out,
  1990. (const felem_bytearray(*))secrets, num_points,
  1991. g_secret,
  1992. mixed, (const smallfelem(*)[17][3])pre_comp, g_pre_comp);
  1993. } else {
  1994. /* do the multiplication without generator precomputation */
  1995. batch_mul(x_out, y_out, z_out,
  1996. (const felem_bytearray(*))secrets, num_points,
  1997. NULL, mixed, (const smallfelem(*)[17][3])pre_comp, NULL);
  1998. }
  1999. /* reduce the output to its unique minimal representation */
  2000. felem_contract(x_in, x_out);
  2001. felem_contract(y_in, y_out);
  2002. felem_contract(z_in, z_out);
  2003. if ((!smallfelem_to_BN(x, x_in)) || (!smallfelem_to_BN(y, y_in)) ||
  2004. (!smallfelem_to_BN(z, z_in))) {
  2005. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  2006. goto err;
  2007. }
  2008. ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
  2009. err:
  2010. BN_CTX_end(ctx);
  2011. EC_POINT_free(generator);
  2012. OPENSSL_free(secrets);
  2013. OPENSSL_free(pre_comp);
  2014. OPENSSL_free(tmp_smallfelems);
  2015. return ret;
  2016. }
  2017. int ec_GFp_nistp256_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  2018. {
  2019. int ret = 0;
  2020. NISTP256_PRE_COMP *pre = NULL;
  2021. int i, j;
  2022. BN_CTX *new_ctx = NULL;
  2023. BIGNUM *x, *y;
  2024. EC_POINT *generator = NULL;
  2025. smallfelem tmp_smallfelems[32];
  2026. felem x_tmp, y_tmp, z_tmp;
  2027. /* throw away old precomputation */
  2028. EC_pre_comp_free(group);
  2029. if (ctx == NULL)
  2030. if ((ctx = new_ctx = BN_CTX_new()) == NULL)
  2031. return 0;
  2032. BN_CTX_start(ctx);
  2033. x = BN_CTX_get(ctx);
  2034. y = BN_CTX_get(ctx);
  2035. if (y == NULL)
  2036. goto err;
  2037. /* get the generator */
  2038. if (group->generator == NULL)
  2039. goto err;
  2040. generator = EC_POINT_new(group);
  2041. if (generator == NULL)
  2042. goto err;
  2043. BN_bin2bn(nistp256_curve_params[3], sizeof(felem_bytearray), x);
  2044. BN_bin2bn(nistp256_curve_params[4], sizeof(felem_bytearray), y);
  2045. if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
  2046. goto err;
  2047. if ((pre = nistp256_pre_comp_new()) == NULL)
  2048. goto err;
  2049. /*
  2050. * if the generator is the standard one, use built-in precomputation
  2051. */
  2052. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
  2053. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  2054. goto done;
  2055. }
  2056. if ((!BN_to_felem(x_tmp, group->generator->X)) ||
  2057. (!BN_to_felem(y_tmp, group->generator->Y)) ||
  2058. (!BN_to_felem(z_tmp, group->generator->Z)))
  2059. goto err;
  2060. felem_shrink(pre->g_pre_comp[0][1][0], x_tmp);
  2061. felem_shrink(pre->g_pre_comp[0][1][1], y_tmp);
  2062. felem_shrink(pre->g_pre_comp[0][1][2], z_tmp);
  2063. /*
  2064. * compute 2^64*G, 2^128*G, 2^192*G for the first table, 2^32*G, 2^96*G,
  2065. * 2^160*G, 2^224*G for the second one
  2066. */
  2067. for (i = 1; i <= 8; i <<= 1) {
  2068. point_double_small(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
  2069. pre->g_pre_comp[1][i][2], pre->g_pre_comp[0][i][0],
  2070. pre->g_pre_comp[0][i][1],
  2071. pre->g_pre_comp[0][i][2]);
  2072. for (j = 0; j < 31; ++j) {
  2073. point_double_small(pre->g_pre_comp[1][i][0],
  2074. pre->g_pre_comp[1][i][1],
  2075. pre->g_pre_comp[1][i][2],
  2076. pre->g_pre_comp[1][i][0],
  2077. pre->g_pre_comp[1][i][1],
  2078. pre->g_pre_comp[1][i][2]);
  2079. }
  2080. if (i == 8)
  2081. break;
  2082. point_double_small(pre->g_pre_comp[0][2 * i][0],
  2083. pre->g_pre_comp[0][2 * i][1],
  2084. pre->g_pre_comp[0][2 * i][2],
  2085. pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
  2086. pre->g_pre_comp[1][i][2]);
  2087. for (j = 0; j < 31; ++j) {
  2088. point_double_small(pre->g_pre_comp[0][2 * i][0],
  2089. pre->g_pre_comp[0][2 * i][1],
  2090. pre->g_pre_comp[0][2 * i][2],
  2091. pre->g_pre_comp[0][2 * i][0],
  2092. pre->g_pre_comp[0][2 * i][1],
  2093. pre->g_pre_comp[0][2 * i][2]);
  2094. }
  2095. }
  2096. for (i = 0; i < 2; i++) {
  2097. /* g_pre_comp[i][0] is the point at infinity */
  2098. memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
  2099. /* the remaining multiples */
  2100. /* 2^64*G + 2^128*G resp. 2^96*G + 2^160*G */
  2101. point_add_small(pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1],
  2102. pre->g_pre_comp[i][6][2], pre->g_pre_comp[i][4][0],
  2103. pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
  2104. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
  2105. pre->g_pre_comp[i][2][2]);
  2106. /* 2^64*G + 2^192*G resp. 2^96*G + 2^224*G */
  2107. point_add_small(pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1],
  2108. pre->g_pre_comp[i][10][2], pre->g_pre_comp[i][8][0],
  2109. pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
  2110. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
  2111. pre->g_pre_comp[i][2][2]);
  2112. /* 2^128*G + 2^192*G resp. 2^160*G + 2^224*G */
  2113. point_add_small(pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1],
  2114. pre->g_pre_comp[i][12][2], pre->g_pre_comp[i][8][0],
  2115. pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
  2116. pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1],
  2117. pre->g_pre_comp[i][4][2]);
  2118. /*
  2119. * 2^64*G + 2^128*G + 2^192*G resp. 2^96*G + 2^160*G + 2^224*G
  2120. */
  2121. point_add_small(pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1],
  2122. pre->g_pre_comp[i][14][2], pre->g_pre_comp[i][12][0],
  2123. pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
  2124. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
  2125. pre->g_pre_comp[i][2][2]);
  2126. for (j = 1; j < 8; ++j) {
  2127. /* odd multiples: add G resp. 2^32*G */
  2128. point_add_small(pre->g_pre_comp[i][2 * j + 1][0],
  2129. pre->g_pre_comp[i][2 * j + 1][1],
  2130. pre->g_pre_comp[i][2 * j + 1][2],
  2131. pre->g_pre_comp[i][2 * j][0],
  2132. pre->g_pre_comp[i][2 * j][1],
  2133. pre->g_pre_comp[i][2 * j][2],
  2134. pre->g_pre_comp[i][1][0],
  2135. pre->g_pre_comp[i][1][1],
  2136. pre->g_pre_comp[i][1][2]);
  2137. }
  2138. }
  2139. make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_smallfelems);
  2140. done:
  2141. SETPRECOMP(group, nistp256, pre);
  2142. pre = NULL;
  2143. ret = 1;
  2144. err:
  2145. BN_CTX_end(ctx);
  2146. EC_POINT_free(generator);
  2147. BN_CTX_free(new_ctx);
  2148. EC_nistp256_pre_comp_free(pre);
  2149. return ret;
  2150. }
  2151. int ec_GFp_nistp256_have_precompute_mult(const EC_GROUP *group)
  2152. {
  2153. return HAVEPRECOMP(group, nistp256);
  2154. }
  2155. #endif