ec_mult.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. /*
  2. * Copyright 2001-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved
  4. *
  5. * Licensed under the OpenSSL license (the "License"). You may not use
  6. * this file except in compliance with the License. You can obtain a copy
  7. * in the file LICENSE in the source distribution or at
  8. * https://www.openssl.org/source/license.html
  9. */
  10. #include <string.h>
  11. #include <openssl/err.h>
  12. #include "internal/cryptlib.h"
  13. #include "crypto/bn.h"
  14. #include "ec_local.h"
  15. #include "internal/refcount.h"
  16. /*
  17. * This file implements the wNAF-based interleaving multi-exponentiation method
  18. * Formerly at:
  19. * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#multiexp
  20. * You might now find it here:
  21. * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
  22. * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf
  23. * For multiplication with precomputation, we use wNAF splitting, formerly at:
  24. * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#fastexp
  25. */
  26. /* structure for precomputed multiples of the generator */
  27. struct ec_pre_comp_st {
  28. const EC_GROUP *group; /* parent EC_GROUP object */
  29. size_t blocksize; /* block size for wNAF splitting */
  30. size_t numblocks; /* max. number of blocks for which we have
  31. * precomputation */
  32. size_t w; /* window size */
  33. EC_POINT **points; /* array with pre-calculated multiples of
  34. * generator: 'num' pointers to EC_POINT
  35. * objects followed by a NULL */
  36. size_t num; /* numblocks * 2^(w-1) */
  37. CRYPTO_REF_COUNT references;
  38. CRYPTO_RWLOCK *lock;
  39. };
  40. static EC_PRE_COMP *ec_pre_comp_new(const EC_GROUP *group)
  41. {
  42. EC_PRE_COMP *ret = NULL;
  43. if (!group)
  44. return NULL;
  45. ret = OPENSSL_zalloc(sizeof(*ret));
  46. if (ret == NULL) {
  47. ECerr(EC_F_EC_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  48. return ret;
  49. }
  50. ret->group = group;
  51. ret->blocksize = 8; /* default */
  52. ret->w = 4; /* default */
  53. ret->references = 1;
  54. ret->lock = CRYPTO_THREAD_lock_new();
  55. if (ret->lock == NULL) {
  56. ECerr(EC_F_EC_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  57. OPENSSL_free(ret);
  58. return NULL;
  59. }
  60. return ret;
  61. }
  62. EC_PRE_COMP *EC_ec_pre_comp_dup(EC_PRE_COMP *pre)
  63. {
  64. int i;
  65. if (pre != NULL)
  66. CRYPTO_UP_REF(&pre->references, &i, pre->lock);
  67. return pre;
  68. }
  69. void EC_ec_pre_comp_free(EC_PRE_COMP *pre)
  70. {
  71. int i;
  72. if (pre == NULL)
  73. return;
  74. CRYPTO_DOWN_REF(&pre->references, &i, pre->lock);
  75. REF_PRINT_COUNT("EC_ec", pre);
  76. if (i > 0)
  77. return;
  78. REF_ASSERT_ISNT(i < 0);
  79. if (pre->points != NULL) {
  80. EC_POINT **pts;
  81. for (pts = pre->points; *pts != NULL; pts++)
  82. EC_POINT_free(*pts);
  83. OPENSSL_free(pre->points);
  84. }
  85. CRYPTO_THREAD_lock_free(pre->lock);
  86. OPENSSL_free(pre);
  87. }
  88. #define EC_POINT_BN_set_flags(P, flags) do { \
  89. BN_set_flags((P)->X, (flags)); \
  90. BN_set_flags((P)->Y, (flags)); \
  91. BN_set_flags((P)->Z, (flags)); \
  92. } while(0)
  93. /*-
  94. * This functions computes a single point multiplication over the EC group,
  95. * using, at a high level, a Montgomery ladder with conditional swaps, with
  96. * various timing attack defenses.
  97. *
  98. * It performs either a fixed point multiplication
  99. * (scalar * generator)
  100. * when point is NULL, or a variable point multiplication
  101. * (scalar * point)
  102. * when point is not NULL.
  103. *
  104. * `scalar` cannot be NULL and should be in the range [0,n) otherwise all
  105. * constant time bets are off (where n is the cardinality of the EC group).
  106. *
  107. * This function expects `group->order` and `group->cardinality` to be well
  108. * defined and non-zero: it fails with an error code otherwise.
  109. *
  110. * NB: This says nothing about the constant-timeness of the ladder step
  111. * implementation (i.e., the default implementation is based on EC_POINT_add and
  112. * EC_POINT_dbl, which of course are not constant time themselves) or the
  113. * underlying multiprecision arithmetic.
  114. *
  115. * The product is stored in `r`.
  116. *
  117. * This is an internal function: callers are in charge of ensuring that the
  118. * input parameters `group`, `r`, `scalar` and `ctx` are not NULL.
  119. *
  120. * Returns 1 on success, 0 otherwise.
  121. */
  122. int ec_scalar_mul_ladder(const EC_GROUP *group, EC_POINT *r,
  123. const BIGNUM *scalar, const EC_POINT *point,
  124. BN_CTX *ctx)
  125. {
  126. int i, cardinality_bits, group_top, kbit, pbit, Z_is_one;
  127. EC_POINT *p = NULL;
  128. EC_POINT *s = NULL;
  129. BIGNUM *k = NULL;
  130. BIGNUM *lambda = NULL;
  131. BIGNUM *cardinality = NULL;
  132. int ret = 0;
  133. /* early exit if the input point is the point at infinity */
  134. if (point != NULL && EC_POINT_is_at_infinity(group, point))
  135. return EC_POINT_set_to_infinity(group, r);
  136. if (BN_is_zero(group->order)) {
  137. ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_UNKNOWN_ORDER);
  138. return 0;
  139. }
  140. if (BN_is_zero(group->cofactor)) {
  141. ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_UNKNOWN_COFACTOR);
  142. return 0;
  143. }
  144. BN_CTX_start(ctx);
  145. if (((p = EC_POINT_new(group)) == NULL)
  146. || ((s = EC_POINT_new(group)) == NULL)) {
  147. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_MALLOC_FAILURE);
  148. goto err;
  149. }
  150. if (point == NULL) {
  151. if (!EC_POINT_copy(p, group->generator)) {
  152. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
  153. goto err;
  154. }
  155. } else {
  156. if (!EC_POINT_copy(p, point)) {
  157. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
  158. goto err;
  159. }
  160. }
  161. EC_POINT_BN_set_flags(p, BN_FLG_CONSTTIME);
  162. EC_POINT_BN_set_flags(r, BN_FLG_CONSTTIME);
  163. EC_POINT_BN_set_flags(s, BN_FLG_CONSTTIME);
  164. cardinality = BN_CTX_get(ctx);
  165. lambda = BN_CTX_get(ctx);
  166. k = BN_CTX_get(ctx);
  167. if (k == NULL) {
  168. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_MALLOC_FAILURE);
  169. goto err;
  170. }
  171. if (!BN_mul(cardinality, group->order, group->cofactor, ctx)) {
  172. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  173. goto err;
  174. }
  175. /*
  176. * Group cardinalities are often on a word boundary.
  177. * So when we pad the scalar, some timing diff might
  178. * pop if it needs to be expanded due to carries.
  179. * So expand ahead of time.
  180. */
  181. cardinality_bits = BN_num_bits(cardinality);
  182. group_top = bn_get_top(cardinality);
  183. if ((bn_wexpand(k, group_top + 2) == NULL)
  184. || (bn_wexpand(lambda, group_top + 2) == NULL)) {
  185. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  186. goto err;
  187. }
  188. if (!BN_copy(k, scalar)) {
  189. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  190. goto err;
  191. }
  192. BN_set_flags(k, BN_FLG_CONSTTIME);
  193. if ((BN_num_bits(k) > cardinality_bits) || (BN_is_negative(k))) {
  194. /*-
  195. * this is an unusual input, and we don't guarantee
  196. * constant-timeness
  197. */
  198. if (!BN_nnmod(k, k, cardinality, ctx)) {
  199. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  200. goto err;
  201. }
  202. }
  203. if (!BN_add(lambda, k, cardinality)) {
  204. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  205. goto err;
  206. }
  207. BN_set_flags(lambda, BN_FLG_CONSTTIME);
  208. if (!BN_add(k, lambda, cardinality)) {
  209. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  210. goto err;
  211. }
  212. /*
  213. * lambda := scalar + cardinality
  214. * k := scalar + 2*cardinality
  215. */
  216. kbit = BN_is_bit_set(lambda, cardinality_bits);
  217. BN_consttime_swap(kbit, k, lambda, group_top + 2);
  218. group_top = bn_get_top(group->field);
  219. if ((bn_wexpand(s->X, group_top) == NULL)
  220. || (bn_wexpand(s->Y, group_top) == NULL)
  221. || (bn_wexpand(s->Z, group_top) == NULL)
  222. || (bn_wexpand(r->X, group_top) == NULL)
  223. || (bn_wexpand(r->Y, group_top) == NULL)
  224. || (bn_wexpand(r->Z, group_top) == NULL)
  225. || (bn_wexpand(p->X, group_top) == NULL)
  226. || (bn_wexpand(p->Y, group_top) == NULL)
  227. || (bn_wexpand(p->Z, group_top) == NULL)) {
  228. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
  229. goto err;
  230. }
  231. /* ensure input point is in affine coords for ladder step efficiency */
  232. if (!p->Z_is_one && !EC_POINT_make_affine(group, p, ctx)) {
  233. ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
  234. goto err;
  235. }
  236. /* Initialize the Montgomery ladder */
  237. if (!ec_point_ladder_pre(group, r, s, p, ctx)) {
  238. ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_PRE_FAILURE);
  239. goto err;
  240. }
  241. /* top bit is a 1, in a fixed pos */
  242. pbit = 1;
  243. #define EC_POINT_CSWAP(c, a, b, w, t) do { \
  244. BN_consttime_swap(c, (a)->X, (b)->X, w); \
  245. BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
  246. BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
  247. t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
  248. (a)->Z_is_one ^= (t); \
  249. (b)->Z_is_one ^= (t); \
  250. } while(0)
  251. /*-
  252. * The ladder step, with branches, is
  253. *
  254. * k[i] == 0: S = add(R, S), R = dbl(R)
  255. * k[i] == 1: R = add(S, R), S = dbl(S)
  256. *
  257. * Swapping R, S conditionally on k[i] leaves you with state
  258. *
  259. * k[i] == 0: T, U = R, S
  260. * k[i] == 1: T, U = S, R
  261. *
  262. * Then perform the ECC ops.
  263. *
  264. * U = add(T, U)
  265. * T = dbl(T)
  266. *
  267. * Which leaves you with state
  268. *
  269. * k[i] == 0: U = add(R, S), T = dbl(R)
  270. * k[i] == 1: U = add(S, R), T = dbl(S)
  271. *
  272. * Swapping T, U conditionally on k[i] leaves you with state
  273. *
  274. * k[i] == 0: R, S = T, U
  275. * k[i] == 1: R, S = U, T
  276. *
  277. * Which leaves you with state
  278. *
  279. * k[i] == 0: S = add(R, S), R = dbl(R)
  280. * k[i] == 1: R = add(S, R), S = dbl(S)
  281. *
  282. * So we get the same logic, but instead of a branch it's a
  283. * conditional swap, followed by ECC ops, then another conditional swap.
  284. *
  285. * Optimization: The end of iteration i and start of i-1 looks like
  286. *
  287. * ...
  288. * CSWAP(k[i], R, S)
  289. * ECC
  290. * CSWAP(k[i], R, S)
  291. * (next iteration)
  292. * CSWAP(k[i-1], R, S)
  293. * ECC
  294. * CSWAP(k[i-1], R, S)
  295. * ...
  296. *
  297. * So instead of two contiguous swaps, you can merge the condition
  298. * bits and do a single swap.
  299. *
  300. * k[i] k[i-1] Outcome
  301. * 0 0 No Swap
  302. * 0 1 Swap
  303. * 1 0 Swap
  304. * 1 1 No Swap
  305. *
  306. * This is XOR. pbit tracks the previous bit of k.
  307. */
  308. for (i = cardinality_bits - 1; i >= 0; i--) {
  309. kbit = BN_is_bit_set(k, i) ^ pbit;
  310. EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
  311. /* Perform a single step of the Montgomery ladder */
  312. if (!ec_point_ladder_step(group, r, s, p, ctx)) {
  313. ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_STEP_FAILURE);
  314. goto err;
  315. }
  316. /*
  317. * pbit logic merges this cswap with that of the
  318. * next iteration
  319. */
  320. pbit ^= kbit;
  321. }
  322. /* one final cswap to move the right value into r */
  323. EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
  324. #undef EC_POINT_CSWAP
  325. /* Finalize ladder (and recover full point coordinates) */
  326. if (!ec_point_ladder_post(group, r, s, p, ctx)) {
  327. ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_POST_FAILURE);
  328. goto err;
  329. }
  330. ret = 1;
  331. err:
  332. EC_POINT_free(p);
  333. EC_POINT_clear_free(s);
  334. BN_CTX_end(ctx);
  335. return ret;
  336. }
  337. #undef EC_POINT_BN_set_flags
  338. /*
  339. * TODO: table should be optimised for the wNAF-based implementation,
  340. * sometimes smaller windows will give better performance (thus the
  341. * boundaries should be increased)
  342. */
  343. #define EC_window_bits_for_scalar_size(b) \
  344. ((size_t) \
  345. ((b) >= 2000 ? 6 : \
  346. (b) >= 800 ? 5 : \
  347. (b) >= 300 ? 4 : \
  348. (b) >= 70 ? 3 : \
  349. (b) >= 20 ? 2 : \
  350. 1))
  351. /*-
  352. * Compute
  353. * \sum scalars[i]*points[i],
  354. * also including
  355. * scalar*generator
  356. * in the addition if scalar != NULL
  357. */
  358. int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
  359. size_t num, const EC_POINT *points[], const BIGNUM *scalars[],
  360. BN_CTX *ctx)
  361. {
  362. const EC_POINT *generator = NULL;
  363. EC_POINT *tmp = NULL;
  364. size_t totalnum;
  365. size_t blocksize = 0, numblocks = 0; /* for wNAF splitting */
  366. size_t pre_points_per_block = 0;
  367. size_t i, j;
  368. int k;
  369. int r_is_inverted = 0;
  370. int r_is_at_infinity = 1;
  371. size_t *wsize = NULL; /* individual window sizes */
  372. signed char **wNAF = NULL; /* individual wNAFs */
  373. size_t *wNAF_len = NULL;
  374. size_t max_len = 0;
  375. size_t num_val;
  376. EC_POINT **val = NULL; /* precomputation */
  377. EC_POINT **v;
  378. EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' or
  379. * 'pre_comp->points' */
  380. const EC_PRE_COMP *pre_comp = NULL;
  381. int num_scalar = 0; /* flag: will be set to 1 if 'scalar' must be
  382. * treated like other scalars, i.e.
  383. * precomputation is not available */
  384. int ret = 0;
  385. if (!BN_is_zero(group->order) && !BN_is_zero(group->cofactor)) {
  386. /*-
  387. * Handle the common cases where the scalar is secret, enforcing a
  388. * scalar multiplication implementation based on a Montgomery ladder,
  389. * with various timing attack defenses.
  390. */
  391. if ((scalar != group->order) && (scalar != NULL) && (num == 0)) {
  392. /*-
  393. * In this case we want to compute scalar * GeneratorPoint: this
  394. * codepath is reached most prominently by (ephemeral) key
  395. * generation of EC cryptosystems (i.e. ECDSA keygen and sign setup,
  396. * ECDH keygen/first half), where the scalar is always secret. This
  397. * is why we ignore if BN_FLG_CONSTTIME is actually set and we
  398. * always call the ladder version.
  399. */
  400. return ec_scalar_mul_ladder(group, r, scalar, NULL, ctx);
  401. }
  402. if ((scalar == NULL) && (num == 1) && (scalars[0] != group->order)) {
  403. /*-
  404. * In this case we want to compute scalar * VariablePoint: this
  405. * codepath is reached most prominently by the second half of ECDH,
  406. * where the secret scalar is multiplied by the peer's public point.
  407. * To protect the secret scalar, we ignore if BN_FLG_CONSTTIME is
  408. * actually set and we always call the ladder version.
  409. */
  410. return ec_scalar_mul_ladder(group, r, scalars[0], points[0], ctx);
  411. }
  412. }
  413. if (scalar != NULL) {
  414. generator = EC_GROUP_get0_generator(group);
  415. if (generator == NULL) {
  416. ECerr(EC_F_EC_WNAF_MUL, EC_R_UNDEFINED_GENERATOR);
  417. goto err;
  418. }
  419. /* look if we can use precomputed multiples of generator */
  420. pre_comp = group->pre_comp.ec;
  421. if (pre_comp && pre_comp->numblocks
  422. && (EC_POINT_cmp(group, generator, pre_comp->points[0], ctx) ==
  423. 0)) {
  424. blocksize = pre_comp->blocksize;
  425. /*
  426. * determine maximum number of blocks that wNAF splitting may
  427. * yield (NB: maximum wNAF length is bit length plus one)
  428. */
  429. numblocks = (BN_num_bits(scalar) / blocksize) + 1;
  430. /*
  431. * we cannot use more blocks than we have precomputation for
  432. */
  433. if (numblocks > pre_comp->numblocks)
  434. numblocks = pre_comp->numblocks;
  435. pre_points_per_block = (size_t)1 << (pre_comp->w - 1);
  436. /* check that pre_comp looks sane */
  437. if (pre_comp->num != (pre_comp->numblocks * pre_points_per_block)) {
  438. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  439. goto err;
  440. }
  441. } else {
  442. /* can't use precomputation */
  443. pre_comp = NULL;
  444. numblocks = 1;
  445. num_scalar = 1; /* treat 'scalar' like 'num'-th element of
  446. * 'scalars' */
  447. }
  448. }
  449. totalnum = num + numblocks;
  450. wsize = OPENSSL_malloc(totalnum * sizeof(wsize[0]));
  451. wNAF_len = OPENSSL_malloc(totalnum * sizeof(wNAF_len[0]));
  452. /* include space for pivot */
  453. wNAF = OPENSSL_malloc((totalnum + 1) * sizeof(wNAF[0]));
  454. val_sub = OPENSSL_malloc(totalnum * sizeof(val_sub[0]));
  455. /* Ensure wNAF is initialised in case we end up going to err */
  456. if (wNAF != NULL)
  457. wNAF[0] = NULL; /* preliminary pivot */
  458. if (wsize == NULL || wNAF_len == NULL || wNAF == NULL || val_sub == NULL) {
  459. ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
  460. goto err;
  461. }
  462. /*
  463. * num_val will be the total number of temporarily precomputed points
  464. */
  465. num_val = 0;
  466. for (i = 0; i < num + num_scalar; i++) {
  467. size_t bits;
  468. bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar);
  469. wsize[i] = EC_window_bits_for_scalar_size(bits);
  470. num_val += (size_t)1 << (wsize[i] - 1);
  471. wNAF[i + 1] = NULL; /* make sure we always have a pivot */
  472. wNAF[i] =
  473. bn_compute_wNAF((i < num ? scalars[i] : scalar), wsize[i],
  474. &wNAF_len[i]);
  475. if (wNAF[i] == NULL)
  476. goto err;
  477. if (wNAF_len[i] > max_len)
  478. max_len = wNAF_len[i];
  479. }
  480. if (numblocks) {
  481. /* we go here iff scalar != NULL */
  482. if (pre_comp == NULL) {
  483. if (num_scalar != 1) {
  484. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  485. goto err;
  486. }
  487. /* we have already generated a wNAF for 'scalar' */
  488. } else {
  489. signed char *tmp_wNAF = NULL;
  490. size_t tmp_len = 0;
  491. if (num_scalar != 0) {
  492. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  493. goto err;
  494. }
  495. /*
  496. * use the window size for which we have precomputation
  497. */
  498. wsize[num] = pre_comp->w;
  499. tmp_wNAF = bn_compute_wNAF(scalar, wsize[num], &tmp_len);
  500. if (!tmp_wNAF)
  501. goto err;
  502. if (tmp_len <= max_len) {
  503. /*
  504. * One of the other wNAFs is at least as long as the wNAF
  505. * belonging to the generator, so wNAF splitting will not buy
  506. * us anything.
  507. */
  508. numblocks = 1;
  509. totalnum = num + 1; /* don't use wNAF splitting */
  510. wNAF[num] = tmp_wNAF;
  511. wNAF[num + 1] = NULL;
  512. wNAF_len[num] = tmp_len;
  513. /*
  514. * pre_comp->points starts with the points that we need here:
  515. */
  516. val_sub[num] = pre_comp->points;
  517. } else {
  518. /*
  519. * don't include tmp_wNAF directly into wNAF array - use wNAF
  520. * splitting and include the blocks
  521. */
  522. signed char *pp;
  523. EC_POINT **tmp_points;
  524. if (tmp_len < numblocks * blocksize) {
  525. /*
  526. * possibly we can do with fewer blocks than estimated
  527. */
  528. numblocks = (tmp_len + blocksize - 1) / blocksize;
  529. if (numblocks > pre_comp->numblocks) {
  530. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  531. OPENSSL_free(tmp_wNAF);
  532. goto err;
  533. }
  534. totalnum = num + numblocks;
  535. }
  536. /* split wNAF in 'numblocks' parts */
  537. pp = tmp_wNAF;
  538. tmp_points = pre_comp->points;
  539. for (i = num; i < totalnum; i++) {
  540. if (i < totalnum - 1) {
  541. wNAF_len[i] = blocksize;
  542. if (tmp_len < blocksize) {
  543. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  544. OPENSSL_free(tmp_wNAF);
  545. goto err;
  546. }
  547. tmp_len -= blocksize;
  548. } else
  549. /*
  550. * last block gets whatever is left (this could be
  551. * more or less than 'blocksize'!)
  552. */
  553. wNAF_len[i] = tmp_len;
  554. wNAF[i + 1] = NULL;
  555. wNAF[i] = OPENSSL_malloc(wNAF_len[i]);
  556. if (wNAF[i] == NULL) {
  557. ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
  558. OPENSSL_free(tmp_wNAF);
  559. goto err;
  560. }
  561. memcpy(wNAF[i], pp, wNAF_len[i]);
  562. if (wNAF_len[i] > max_len)
  563. max_len = wNAF_len[i];
  564. if (*tmp_points == NULL) {
  565. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  566. OPENSSL_free(tmp_wNAF);
  567. goto err;
  568. }
  569. val_sub[i] = tmp_points;
  570. tmp_points += pre_points_per_block;
  571. pp += blocksize;
  572. }
  573. OPENSSL_free(tmp_wNAF);
  574. }
  575. }
  576. }
  577. /*
  578. * All points we precompute now go into a single array 'val'.
  579. * 'val_sub[i]' is a pointer to the subarray for the i-th point, or to a
  580. * subarray of 'pre_comp->points' if we already have precomputation.
  581. */
  582. val = OPENSSL_malloc((num_val + 1) * sizeof(val[0]));
  583. if (val == NULL) {
  584. ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
  585. goto err;
  586. }
  587. val[num_val] = NULL; /* pivot element */
  588. /* allocate points for precomputation */
  589. v = val;
  590. for (i = 0; i < num + num_scalar; i++) {
  591. val_sub[i] = v;
  592. for (j = 0; j < ((size_t)1 << (wsize[i] - 1)); j++) {
  593. *v = EC_POINT_new(group);
  594. if (*v == NULL)
  595. goto err;
  596. v++;
  597. }
  598. }
  599. if (!(v == val + num_val)) {
  600. ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
  601. goto err;
  602. }
  603. if ((tmp = EC_POINT_new(group)) == NULL)
  604. goto err;
  605. /*-
  606. * prepare precomputed values:
  607. * val_sub[i][0] := points[i]
  608. * val_sub[i][1] := 3 * points[i]
  609. * val_sub[i][2] := 5 * points[i]
  610. * ...
  611. */
  612. for (i = 0; i < num + num_scalar; i++) {
  613. if (i < num) {
  614. if (!EC_POINT_copy(val_sub[i][0], points[i]))
  615. goto err;
  616. } else {
  617. if (!EC_POINT_copy(val_sub[i][0], generator))
  618. goto err;
  619. }
  620. if (wsize[i] > 1) {
  621. if (!EC_POINT_dbl(group, tmp, val_sub[i][0], ctx))
  622. goto err;
  623. for (j = 1; j < ((size_t)1 << (wsize[i] - 1)); j++) {
  624. if (!EC_POINT_add
  625. (group, val_sub[i][j], val_sub[i][j - 1], tmp, ctx))
  626. goto err;
  627. }
  628. }
  629. }
  630. if (!EC_POINTs_make_affine(group, num_val, val, ctx))
  631. goto err;
  632. r_is_at_infinity = 1;
  633. for (k = max_len - 1; k >= 0; k--) {
  634. if (!r_is_at_infinity) {
  635. if (!EC_POINT_dbl(group, r, r, ctx))
  636. goto err;
  637. }
  638. for (i = 0; i < totalnum; i++) {
  639. if (wNAF_len[i] > (size_t)k) {
  640. int digit = wNAF[i][k];
  641. int is_neg;
  642. if (digit) {
  643. is_neg = digit < 0;
  644. if (is_neg)
  645. digit = -digit;
  646. if (is_neg != r_is_inverted) {
  647. if (!r_is_at_infinity) {
  648. if (!EC_POINT_invert(group, r, ctx))
  649. goto err;
  650. }
  651. r_is_inverted = !r_is_inverted;
  652. }
  653. /* digit > 0 */
  654. if (r_is_at_infinity) {
  655. if (!EC_POINT_copy(r, val_sub[i][digit >> 1]))
  656. goto err;
  657. /*-
  658. * Apply coordinate blinding for EC_POINT.
  659. *
  660. * The underlying EC_METHOD can optionally implement this function:
  661. * ec_point_blind_coordinates() returns 0 in case of errors or 1 on
  662. * success or if coordinate blinding is not implemented for this
  663. * group.
  664. */
  665. if (!ec_point_blind_coordinates(group, r, ctx)) {
  666. ECerr(EC_F_EC_WNAF_MUL, EC_R_POINT_COORDINATES_BLIND_FAILURE);
  667. goto err;
  668. }
  669. r_is_at_infinity = 0;
  670. } else {
  671. if (!EC_POINT_add
  672. (group, r, r, val_sub[i][digit >> 1], ctx))
  673. goto err;
  674. }
  675. }
  676. }
  677. }
  678. }
  679. if (r_is_at_infinity) {
  680. if (!EC_POINT_set_to_infinity(group, r))
  681. goto err;
  682. } else {
  683. if (r_is_inverted)
  684. if (!EC_POINT_invert(group, r, ctx))
  685. goto err;
  686. }
  687. ret = 1;
  688. err:
  689. EC_POINT_free(tmp);
  690. OPENSSL_free(wsize);
  691. OPENSSL_free(wNAF_len);
  692. if (wNAF != NULL) {
  693. signed char **w;
  694. for (w = wNAF; *w != NULL; w++)
  695. OPENSSL_free(*w);
  696. OPENSSL_free(wNAF);
  697. }
  698. if (val != NULL) {
  699. for (v = val; *v != NULL; v++)
  700. EC_POINT_clear_free(*v);
  701. OPENSSL_free(val);
  702. }
  703. OPENSSL_free(val_sub);
  704. return ret;
  705. }
  706. /*-
  707. * ec_wNAF_precompute_mult()
  708. * creates an EC_PRE_COMP object with preprecomputed multiples of the generator
  709. * for use with wNAF splitting as implemented in ec_wNAF_mul().
  710. *
  711. * 'pre_comp->points' is an array of multiples of the generator
  712. * of the following form:
  713. * points[0] = generator;
  714. * points[1] = 3 * generator;
  715. * ...
  716. * points[2^(w-1)-1] = (2^(w-1)-1) * generator;
  717. * points[2^(w-1)] = 2^blocksize * generator;
  718. * points[2^(w-1)+1] = 3 * 2^blocksize * generator;
  719. * ...
  720. * points[2^(w-1)*(numblocks-1)-1] = (2^(w-1)) * 2^(blocksize*(numblocks-2)) * generator
  721. * points[2^(w-1)*(numblocks-1)] = 2^(blocksize*(numblocks-1)) * generator
  722. * ...
  723. * points[2^(w-1)*numblocks-1] = (2^(w-1)) * 2^(blocksize*(numblocks-1)) * generator
  724. * points[2^(w-1)*numblocks] = NULL
  725. */
  726. int ec_wNAF_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  727. {
  728. const EC_POINT *generator;
  729. EC_POINT *tmp_point = NULL, *base = NULL, **var;
  730. BN_CTX *new_ctx = NULL;
  731. const BIGNUM *order;
  732. size_t i, bits, w, pre_points_per_block, blocksize, numblocks, num;
  733. EC_POINT **points = NULL;
  734. EC_PRE_COMP *pre_comp;
  735. int ret = 0;
  736. /* if there is an old EC_PRE_COMP object, throw it away */
  737. EC_pre_comp_free(group);
  738. if ((pre_comp = ec_pre_comp_new(group)) == NULL)
  739. return 0;
  740. generator = EC_GROUP_get0_generator(group);
  741. if (generator == NULL) {
  742. ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, EC_R_UNDEFINED_GENERATOR);
  743. goto err;
  744. }
  745. if (ctx == NULL) {
  746. ctx = new_ctx = BN_CTX_new();
  747. if (ctx == NULL)
  748. goto err;
  749. }
  750. BN_CTX_start(ctx);
  751. order = EC_GROUP_get0_order(group);
  752. if (order == NULL)
  753. goto err;
  754. if (BN_is_zero(order)) {
  755. ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, EC_R_UNKNOWN_ORDER);
  756. goto err;
  757. }
  758. bits = BN_num_bits(order);
  759. /*
  760. * The following parameters mean we precompute (approximately) one point
  761. * per bit. TBD: The combination 8, 4 is perfect for 160 bits; for other
  762. * bit lengths, other parameter combinations might provide better
  763. * efficiency.
  764. */
  765. blocksize = 8;
  766. w = 4;
  767. if (EC_window_bits_for_scalar_size(bits) > w) {
  768. /* let's not make the window too small ... */
  769. w = EC_window_bits_for_scalar_size(bits);
  770. }
  771. numblocks = (bits + blocksize - 1) / blocksize; /* max. number of blocks
  772. * to use for wNAF
  773. * splitting */
  774. pre_points_per_block = (size_t)1 << (w - 1);
  775. num = pre_points_per_block * numblocks; /* number of points to compute
  776. * and store */
  777. points = OPENSSL_malloc(sizeof(*points) * (num + 1));
  778. if (points == NULL) {
  779. ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
  780. goto err;
  781. }
  782. var = points;
  783. var[num] = NULL; /* pivot */
  784. for (i = 0; i < num; i++) {
  785. if ((var[i] = EC_POINT_new(group)) == NULL) {
  786. ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
  787. goto err;
  788. }
  789. }
  790. if ((tmp_point = EC_POINT_new(group)) == NULL
  791. || (base = EC_POINT_new(group)) == NULL) {
  792. ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
  793. goto err;
  794. }
  795. if (!EC_POINT_copy(base, generator))
  796. goto err;
  797. /* do the precomputation */
  798. for (i = 0; i < numblocks; i++) {
  799. size_t j;
  800. if (!EC_POINT_dbl(group, tmp_point, base, ctx))
  801. goto err;
  802. if (!EC_POINT_copy(*var++, base))
  803. goto err;
  804. for (j = 1; j < pre_points_per_block; j++, var++) {
  805. /*
  806. * calculate odd multiples of the current base point
  807. */
  808. if (!EC_POINT_add(group, *var, tmp_point, *(var - 1), ctx))
  809. goto err;
  810. }
  811. if (i < numblocks - 1) {
  812. /*
  813. * get the next base (multiply current one by 2^blocksize)
  814. */
  815. size_t k;
  816. if (blocksize <= 2) {
  817. ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_INTERNAL_ERROR);
  818. goto err;
  819. }
  820. if (!EC_POINT_dbl(group, base, tmp_point, ctx))
  821. goto err;
  822. for (k = 2; k < blocksize; k++) {
  823. if (!EC_POINT_dbl(group, base, base, ctx))
  824. goto err;
  825. }
  826. }
  827. }
  828. if (!EC_POINTs_make_affine(group, num, points, ctx))
  829. goto err;
  830. pre_comp->group = group;
  831. pre_comp->blocksize = blocksize;
  832. pre_comp->numblocks = numblocks;
  833. pre_comp->w = w;
  834. pre_comp->points = points;
  835. points = NULL;
  836. pre_comp->num = num;
  837. SETPRECOMP(group, ec, pre_comp);
  838. pre_comp = NULL;
  839. ret = 1;
  840. err:
  841. BN_CTX_end(ctx);
  842. BN_CTX_free(new_ctx);
  843. EC_ec_pre_comp_free(pre_comp);
  844. if (points) {
  845. EC_POINT **p;
  846. for (p = points; *p != NULL; p++)
  847. EC_POINT_free(*p);
  848. OPENSSL_free(points);
  849. }
  850. EC_POINT_free(tmp_point);
  851. EC_POINT_free(base);
  852. return ret;
  853. }
  854. int ec_wNAF_have_precompute_mult(const EC_GROUP *group)
  855. {
  856. return HAVEPRECOMP(group, ec);
  857. }