lbn64.c 115 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067
  1. /*
  2. * Copyright (c) 1995 Colin Plumb. All rights reserved.
  3. * For licensing and other legal details, see the file legal.c.
  4. *
  5. * lbn64.c - Low-level bignum routines, 64-bit version.
  6. *
  7. * NOTE: the magic constants "64" and "128" appear in many places in this
  8. * file, including inside identifiers. Because it is not possible to
  9. * ask "#ifdef" of a macro expansion, it is not possible to use the
  10. * preprocessor to conditionalize these properly. Thus, this file is
  11. * intended to be edited with textual search and replace to produce
  12. * alternate word size versions. Any reference to the number of bits
  13. * in a word must be the string "64", and that string must not appear
  14. * otherwise. Any reference to twice this number must appear as "128",
  15. * which likewise must not appear otherwise. Is that clear?
  16. *
  17. * Remember, when doubling the bit size replace the larger number (128)
  18. * first, then the smaller (64). When halving the bit size, do the
  19. * opposite. Otherwise, things will get wierd. Also, be sure to replace
  20. * every instance that appears. (:%s/foo/bar/g in vi)
  21. *
  22. * These routines work with a pointer to the least-significant end of
  23. * an array of WORD64s. The BIG(x), LITTLE(y) and BIGLTTLE(x,y) macros
  24. * defined in lbn.h (which expand to x on a big-edian machine and y on a
  25. * little-endian machine) are used to conditionalize the code to work
  26. * either way. If you have no assembly primitives, it doesn't matter.
  27. * Note that on a big-endian machine, the least-significant-end pointer
  28. * is ONE PAST THE END. The bytes are ptr[-1] through ptr[-len].
  29. * On little-endian, they are ptr[0] through ptr[len-1]. This makes
  30. * perfect sense if you consider pointers to point *between* bytes rather
  31. * than at them.
  32. *
  33. * Because the array index values are unsigned integers, ptr[-i]
  34. * may not work properly, since the index -i is evaluated as an unsigned,
  35. * and if pointers are wider, zero-extension will produce a positive
  36. * number rahter than the needed negative. The expression used in this
  37. * code, *(ptr-i) will, however, work. (The array syntax is equivalent
  38. * to *(ptr+-i), which is a pretty subtle difference.)
  39. *
  40. * Many of these routines will get very unhappy if fed zero-length inputs.
  41. * They use assert() to enforce this. An higher layer of code must make
  42. * sure that these aren't called with zero-length inputs.
  43. *
  44. * Any of these routines can be replaced with more efficient versions
  45. * elsewhere, by just #defining their names. If one of the names
  46. * is #defined, the C code is not compiled in and no declaration is
  47. * made. Use the BNINCLUDE file to do that. Typically, you compile
  48. * asm subroutines with the same name and just, e.g.
  49. * #define lbnMulAdd1_64 lbnMulAdd1_64
  50. *
  51. * If you want to write asm routines, start with lbnMulAdd1_64().
  52. * This is the workhorse of modular exponentiation. lbnMulN1_64() is
  53. * also used a fair bit, although not as much and it's defined in terms
  54. * of lbnMulAdd1_64 if that has a custom version. lbnMulSub1_64 and
  55. * lbnDiv21_64 are used in the usual division and remainder finding.
  56. * (Not the Montgomery reduction used in modular exponentiation, though.)
  57. * Once you have lbnMulAdd1_64 defined, writing the other two should
  58. * be pretty easy. (Just make sure you get the sign of the subtraction
  59. * in lbnMulSub1_64 right - it's dest = dest - source * k.)
  60. *
  61. * The only definitions that absolutely need a double-word (BNWORD128)
  62. * type are lbnMulAdd1_64 and lbnMulSub1_64; if those are provided,
  63. * the rest follows. lbnDiv21_64, however, is a lot slower unless you
  64. * have them, and lbnModQ_64 takes after it. That one is used quite a
  65. * bit for prime sieving.
  66. */
  67. #ifndef HAVE_CONFIG_H
  68. #define HAVE_CONFIG_H 0
  69. #endif
  70. #if HAVE_CONFIG_H
  71. #include "bnconfig.h"
  72. #endif
  73. /*
  74. * Some compilers complain about #if FOO if FOO isn't defined,
  75. * so do the ANSI-mandated thing explicitly...
  76. */
  77. #ifndef NO_ASSERT_H
  78. #define NO_ASSERT_H 0
  79. #endif
  80. #ifndef NO_STRING_H
  81. #define NO_STRING_H 0
  82. #endif
  83. #ifndef HAVE_STRINGS_H
  84. #define HAVE_STRINGS_H 0
  85. #endif
  86. #if !NO_ASSERT_H
  87. #include <assert.h>
  88. #else
  89. #define assert(x) (void)0
  90. #endif
  91. #if !NO_STRING_H
  92. #include <string.h> /* For memcpy */
  93. #elif HAVE_STRINGS_H
  94. #include <strings.h>
  95. #endif
  96. #include "lbn.h"
  97. #include "lbn64.h"
  98. #include "lbnmem.h"
  99. #include "kludge.h"
  100. #ifndef BNWORD64
  101. #error 64-bit bignum library requires a 64-bit data type
  102. #endif
  103. /* If this is defined, include bnYield() calls */
  104. #if BNYIELD
  105. extern int (*bnYield)(void); /* From bn.c */
  106. #endif
  107. /*
  108. * Most of the multiply (and Montgomery reduce) routines use an outer
  109. * loop that iterates over one of the operands - a so-called operand
  110. * scanning approach. One big advantage of this is that the assembly
  111. * support routines are simpler. The loops can be rearranged to have
  112. * an outer loop that iterates over the product, a so-called product
  113. * scanning approach. This has the advantage of writing less data
  114. * and doing fewer adds to memory, so is supposedly faster. Some
  115. * code has been written using a product-scanning approach, but
  116. * it appears to be slower, so it is turned off by default. Some
  117. * experimentation would be appreciated.
  118. *
  119. * (The code is also annoying to get right and not very well commented,
  120. * one of my pet peeves about math libraries. I'm sorry.)
  121. */
  122. #ifndef PRODUCT_SCAN
  123. #define PRODUCT_SCAN 0
  124. #endif
  125. /*
  126. * Copy an array of words. <Marvin mode on> Thrilling, isn't it? </Marvin>
  127. * This is a good example of how the byte offsets and BIGLITTLE() macros work.
  128. * Another alternative would have been
  129. * memcpy(dest BIG(-len), src BIG(-len), len*sizeof(BNWORD64)), but I find that
  130. * putting operators into conditional macros is confusing.
  131. */
  132. #ifndef lbnCopy_64
  133. void
  134. lbnCopy_64(BNWORD64 *dest, BNWORD64 const *src, unsigned len)
  135. {
  136. memcpy(BIGLITTLE(dest-len,dest), BIGLITTLE(src-len,src),
  137. len * sizeof(*src));
  138. }
  139. #endif /* !lbnCopy_64 */
  140. /*
  141. * Fill n words with zero. This does it manually rather than calling
  142. * memset because it can assume alignment to make things faster while
  143. * memset can't. Note how big-endian numbers are naturally addressed
  144. * using predecrement, while little-endian is postincrement.
  145. */
  146. #ifndef lbnZero_64
  147. void
  148. lbnZero_64(BNWORD64 *num, unsigned len)
  149. {
  150. while (len--)
  151. BIGLITTLE(*--num,*num++) = 0;
  152. }
  153. #endif /* !lbnZero_64 */
  154. /*
  155. * Negate an array of words.
  156. * Negation is subtraction from zero. Negating low-order words
  157. * entails doing nothing until a non-zero word is hit. Once that
  158. * is negated, a borrow is generated and never dies until the end
  159. * of the number is hit. Negation with borrow, -x-1, is the same as ~x.
  160. * Repeat that until the end of the number.
  161. *
  162. * Doesn't return borrow out because that's pretty useless - it's
  163. * always set unless the input is 0, which is easy to notice in
  164. * normalized form.
  165. */
  166. #ifndef lbnNeg_64
  167. void
  168. lbnNeg_64(BNWORD64 *num, unsigned len)
  169. {
  170. assert(len);
  171. /* Skip low-order zero words */
  172. while (BIGLITTLE(*--num,*num) == 0) {
  173. if (!--len)
  174. return;
  175. LITTLE(num++;)
  176. }
  177. /* Negate the lowest-order non-zero word */
  178. *num = -*num;
  179. /* Complement all the higher-order words */
  180. while (--len) {
  181. BIGLITTLE(--num,++num);
  182. *num = ~*num;
  183. }
  184. }
  185. #endif /* !lbnNeg_64 */
  186. /*
  187. * lbnAdd1_64: add the single-word "carry" to the given number.
  188. * Used for minor increments and propagating the carry after
  189. * adding in a shorter bignum.
  190. *
  191. * Technique: If we have a double-width word, presumably the compiler
  192. * can add using its carry in inline code, so we just use a larger
  193. * accumulator to compute the carry from the first addition.
  194. * If not, it's more complex. After adding the first carry, which may
  195. * be > 1, compare the sum and the carry. If the sum wraps (causing a
  196. * carry out from the addition), the result will be less than each of the
  197. * inputs, since the wrap subtracts a number (2^64) which is larger than
  198. * the other input can possibly be. If the sum is >= the carry input,
  199. * return success immediately.
  200. * In either case, if there is a carry, enter a loop incrementing words
  201. * until one does not wrap. Since we are adding 1 each time, the wrap
  202. * will be to 0 and we can test for equality.
  203. */
  204. #ifndef lbnAdd1_64 /* If defined, it's provided as an asm subroutine */
  205. #ifdef BNWORD128
  206. BNWORD64
  207. lbnAdd1_64(BNWORD64 *num, unsigned len, BNWORD64 carry)
  208. {
  209. BNWORD128 t;
  210. assert(len > 0); /* Alternative: if (!len) return carry */
  211. t = (BNWORD128)BIGLITTLE(*--num,*num) + carry;
  212. BIGLITTLE(*num,*num++) = (BNWORD64)t;
  213. if ((t >> 64) == 0)
  214. return 0;
  215. while (--len) {
  216. if (++BIGLITTLE(*--num,*num++) != 0)
  217. return 0;
  218. }
  219. return 1;
  220. }
  221. #else /* no BNWORD128 */
  222. BNWORD64
  223. lbnAdd1_64(BNWORD64 *num, unsigned len, BNWORD64 carry)
  224. {
  225. assert(len > 0); /* Alternative: if (!len) return carry */
  226. if ((BIGLITTLE(*--num,*num++) += carry) >= carry)
  227. return 0;
  228. while (--len) {
  229. if (++BIGLITTLE(*--num,*num++) != 0)
  230. return 0;
  231. }
  232. return 1;
  233. }
  234. #endif
  235. #endif/* !lbnAdd1_64 */
  236. /*
  237. * lbnSub1_64: subtract the single-word "borrow" from the given number.
  238. * Used for minor decrements and propagating the borrow after
  239. * subtracting a shorter bignum.
  240. *
  241. * Technique: Similar to the add, above. If there is a double-length type,
  242. * use that to generate the first borrow.
  243. * If not, after subtracting the first borrow, which may be > 1, compare
  244. * the difference and the *negative* of the carry. If the subtract wraps
  245. * (causing a borrow out from the subtraction), the result will be at least
  246. * as large as -borrow. If the result < -borrow, then no borrow out has
  247. * appeared and we may return immediately, except when borrow == 0. To
  248. * deal with that case, use the identity that -x = ~x+1, and instead of
  249. * comparing < -borrow, compare for <= ~borrow.
  250. * Either way, if there is a borrow out, enter a loop decrementing words
  251. * until a non-zero word is reached.
  252. *
  253. * Note the cast of ~borrow to (BNWORD64). If the size of an int is larger
  254. * than BNWORD64, C rules say the number is expanded for the arithmetic, so
  255. * the inversion will be done on an int and the value won't be quite what
  256. * is expected.
  257. */
  258. #ifndef lbnSub1_64 /* If defined, it's provided as an asm subroutine */
  259. #ifdef BNWORD128
  260. BNWORD64
  261. lbnSub1_64(BNWORD64 *num, unsigned len, BNWORD64 borrow)
  262. {
  263. BNWORD128 t;
  264. assert(len > 0); /* Alternative: if (!len) return borrow */
  265. t = (BNWORD128)BIGLITTLE(*--num,*num) - borrow;
  266. BIGLITTLE(*num,*num++) = (BNWORD64)t;
  267. if ((t >> 64) == 0)
  268. return 0;
  269. while (--len) {
  270. if ((BIGLITTLE(*--num,*num++))-- != 0)
  271. return 0;
  272. }
  273. return 1;
  274. }
  275. #else /* no BNWORD128 */
  276. BNWORD64
  277. lbnSub1_64(BNWORD64 *num, unsigned len, BNWORD64 borrow)
  278. {
  279. assert(len > 0); /* Alternative: if (!len) return borrow */
  280. if ((BIGLITTLE(*--num,*num++) -= borrow) <= (BNWORD64)~borrow)
  281. return 0;
  282. while (--len) {
  283. if ((BIGLITTLE(*--num,*num++))-- != 0)
  284. return 0;
  285. }
  286. return 1;
  287. }
  288. #endif
  289. #endif /* !lbnSub1_64 */
  290. /*
  291. * lbnAddN_64: add two bignums of the same length, returning the carry (0 or 1).
  292. * One of the building blocks, along with lbnAdd1, of adding two bignums of
  293. * differing lengths.
  294. *
  295. * Technique: Maintain a word of carry. If there is no double-width type,
  296. * use the same technique as in lbnAdd1, above, to maintain the carry by
  297. * comparing the inputs. Adding the carry sources is used as an OR operator;
  298. * at most one of the two comparisons can possibly be true. The first can
  299. * only be true if carry == 1 and x, the result, is 0. In that case the
  300. * second can't possibly be true.
  301. */
  302. #ifndef lbnAddN_64
  303. #ifdef BNWORD128
  304. BNWORD64
  305. lbnAddN_64(BNWORD64 *num1, BNWORD64 const *num2, unsigned len)
  306. {
  307. BNWORD128 t;
  308. assert(len > 0);
  309. t = (BNWORD128)BIGLITTLE(*--num1,*num1) + BIGLITTLE(*--num2,*num2++);
  310. BIGLITTLE(*num1,*num1++) = (BNWORD64)t;
  311. while (--len) {
  312. t = (BNWORD128)BIGLITTLE(*--num1,*num1) +
  313. (BNWORD128)BIGLITTLE(*--num2,*num2++) + (t >> 64);
  314. BIGLITTLE(*num1,*num1++) = (BNWORD64)t;
  315. }
  316. return (BNWORD64)(t>>64);
  317. }
  318. #else /* no BNWORD128 */
  319. BNWORD64
  320. lbnAddN_64(BNWORD64 *num1, BNWORD64 const *num2, unsigned len)
  321. {
  322. BNWORD64 x, carry = 0;
  323. assert(len > 0); /* Alternative: change loop to test at start */
  324. do {
  325. x = BIGLITTLE(*--num2,*num2++);
  326. carry = (x += carry) < carry;
  327. carry += (BIGLITTLE(*--num1,*num1++) += x) < x;
  328. } while (--len);
  329. return carry;
  330. }
  331. #endif
  332. #endif /* !lbnAddN_64 */
  333. /*
  334. * lbnSubN_64: add two bignums of the same length, returning the carry (0 or 1).
  335. * One of the building blocks, along with subn1, of subtracting two bignums of
  336. * differing lengths.
  337. *
  338. * Technique: If no double-width type is availble, maintain a word of borrow.
  339. * First, add the borrow to the subtrahend (did you have to learn all those
  340. * awful words in elementary school, too?), and if it overflows, set the
  341. * borrow again. Then subtract the modified subtrahend from the next word
  342. * of input, using the same technique as in subn1, above.
  343. * Adding the borrows is used as an OR operator; at most one of the two
  344. * comparisons can possibly be true. The first can only be true if
  345. * borrow == 1 and x, the result, is 0. In that case the second can't
  346. * possibly be true.
  347. *
  348. * In the double-word case, (BNWORD64)-(t>>64) is subtracted, rather than
  349. * adding t>>64, because the shift would need to sign-extend and that's
  350. * not guaranteed to happen in ANSI C, even with signed types.
  351. */
  352. #ifndef lbnSubN_64
  353. #ifdef BNWORD128
  354. BNWORD64
  355. lbnSubN_64(BNWORD64 *num1, BNWORD64 const *num2, unsigned len)
  356. {
  357. BNWORD128 t;
  358. assert(len > 0);
  359. t = (BNWORD128)BIGLITTLE(*--num1,*num1) - BIGLITTLE(*--num2,*num2++);
  360. BIGLITTLE(*num1,*num1++) = (BNWORD64)t;
  361. while (--len) {
  362. t = (BNWORD128)BIGLITTLE(*--num1,*num1) -
  363. (BNWORD128)BIGLITTLE(*--num2,*num2++) - (BNWORD64)-(t >> 64);
  364. BIGLITTLE(*num1,*num1++) = (BNWORD64)t;
  365. }
  366. return -(BNWORD64)(t>>64);
  367. }
  368. #else
  369. BNWORD64
  370. lbnSubN_64(BNWORD64 *num1, BNWORD64 const *num2, unsigned len)
  371. {
  372. BNWORD64 x, borrow = 0;
  373. assert(len > 0); /* Alternative: change loop to test at start */
  374. do {
  375. x = BIGLITTLE(*--num2,*num2++);
  376. borrow = (x += borrow) < borrow;
  377. borrow += (BIGLITTLE(*--num1,*num1++) -= x) > (BNWORD64)~x;
  378. } while (--len);
  379. return borrow;
  380. }
  381. #endif
  382. #endif /* !lbnSubN_64 */
  383. #ifndef lbnCmp_64
  384. /*
  385. * lbnCmp_64: compare two bignums of equal length, returning the sign of
  386. * num1 - num2. (-1, 0 or +1).
  387. *
  388. * Technique: Change the little-endian pointers to big-endian pointers
  389. * and compare from the most-significant end until a difference if found.
  390. * When it is, figure out the sign of the difference and return it.
  391. */
  392. int
  393. lbnCmp_64(BNWORD64 const *num1, BNWORD64 const *num2, unsigned len)
  394. {
  395. BIGLITTLE(num1 -= len, num1 += len);
  396. BIGLITTLE(num2 -= len, num2 += len);
  397. while (len--) {
  398. if (BIGLITTLE(*num1++ != *num2++, *--num1 != *--num2)) {
  399. if (BIGLITTLE(num1[-1] < num2[-1], *num1 < *num2))
  400. return -1;
  401. else
  402. return 1;
  403. }
  404. }
  405. return 0;
  406. }
  407. #endif /* !lbnCmp_64 */
  408. /*
  409. * mul64_ppmmaa(ph,pl,x,y,a,b) is an optional routine that
  410. * computes (ph,pl) = x * y + a + b. mul64_ppmma and mul64_ppmm
  411. * are simpler versions. If you want to be lazy, all of these
  412. * can be defined in terms of the others, so here we create any
  413. * that have not been defined in terms of the ones that have been.
  414. */
  415. /* Define ones with fewer a's in terms of ones with more a's */
  416. #if !defined(mul64_ppmma) && defined(mul64_ppmmaa)
  417. #define mul64_ppmma(ph,pl,x,y,a) mul64_ppmmaa(ph,pl,x,y,a,0)
  418. #endif
  419. #if !defined(mul64_ppmm) && defined(mul64_ppmma)
  420. #define mul64_ppmm(ph,pl,x,y) mul64_ppmma(ph,pl,x,y,0)
  421. #endif
  422. /*
  423. * Use this definition to test the mul64_ppmm-based operations on machines
  424. * that do not provide mul64_ppmm. Change the final "0" to a "1" to
  425. * enable it.
  426. */
  427. #if !defined(mul64_ppmm) && defined(BNWORD128) && 0 /* Debugging */
  428. #define mul64_ppmm(ph,pl,x,y) \
  429. ({BNWORD128 _ = (BNWORD128)(x)*(y); (pl) = _; (ph) = _>>64;})
  430. #endif
  431. #if defined(mul64_ppmm) && !defined(mul64_ppmma)
  432. #define mul64_ppmma(ph,pl,x,y,a) \
  433. (mul64_ppmm(ph,pl,x,y), (ph) += ((pl) += (a)) < (a))
  434. #endif
  435. #if defined(mul64_ppmma) && !defined(mul64_ppmmaa)
  436. #define mul64_ppmmaa(ph,pl,x,y,a,b) \
  437. (mul64_ppmma(ph,pl,x,y,a), (ph) += ((pl) += (b)) < (b))
  438. #endif
  439. /*
  440. * lbnMulN1_64: Multiply an n-word input by a 1-word input and store the
  441. * n+1-word product. This uses either the mul64_ppmm and mul64_ppmma
  442. * macros, or C multiplication with the BNWORD128 type. This uses mul64_ppmma
  443. * if available, assuming you won't bother defining it unless you can do
  444. * better than the normal multiplication.
  445. */
  446. #ifndef lbnMulN1_64
  447. #ifdef lbnMulAdd1_64 /* If we have this asm primitive, use it. */
  448. void
  449. lbnMulN1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  450. {
  451. lbnZero_64(out, len);
  452. BIGLITTLE(*(out-len-1),*(out+len)) = lbnMulAdd1_64(out, in, len, k);
  453. }
  454. #elif defined(mul64_ppmm)
  455. void
  456. lbnMulN1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  457. {
  458. BNWORD64 carry, carryin;
  459. assert(len > 0);
  460. BIG(--out;--in;);
  461. mul64_ppmm(carry, *out, *in, k);
  462. LITTLE(out++;in++;)
  463. while (--len) {
  464. BIG(--out;--in;)
  465. carryin = carry;
  466. mul64_ppmma(carry, *out, *in, k, carryin);
  467. LITTLE(out++;in++;)
  468. }
  469. BIGLITTLE(*--out,*out) = carry;
  470. }
  471. #elif defined(BNWORD128)
  472. void
  473. lbnMulN1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  474. {
  475. BNWORD128 p;
  476. assert(len > 0);
  477. p = (BNWORD128)BIGLITTLE(*--in,*in++) * k;
  478. BIGLITTLE(*--out,*out++) = (BNWORD64)p;
  479. while (--len) {
  480. p = (BNWORD128)BIGLITTLE(*--in,*in++) * k + (BNWORD64)(p >> 64);
  481. BIGLITTLE(*--out,*out++) = (BNWORD64)p;
  482. }
  483. BIGLITTLE(*--out,*out) = (BNWORD64)(p >> 64);
  484. }
  485. #else
  486. #error No 64x64 -> 128 multiply available for 64-bit bignum package
  487. #endif
  488. #endif /* lbnMulN1_64 */
  489. /*
  490. * lbnMulAdd1_64: Multiply an n-word input by a 1-word input and add the
  491. * low n words of the product to the destination. *Returns the n+1st word
  492. * of the product.* (That turns out to be more convenient than adding
  493. * it into the destination and dealing with a possible unit carry out
  494. * of *that*.) This uses either the mul64_ppmma and mul64_ppmmaa macros,
  495. * or C multiplication with the BNWORD128 type.
  496. *
  497. * If you're going to write assembly primitives, this is the one to
  498. * start with. It is by far the most commonly called function.
  499. */
  500. #ifndef lbnMulAdd1_64
  501. #if defined(mul64_ppmm)
  502. BNWORD64
  503. lbnMulAdd1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  504. {
  505. BNWORD64 prod, carry, carryin;
  506. assert(len > 0);
  507. BIG(--out;--in;);
  508. carryin = *out;
  509. mul64_ppmma(carry, *out, *in, k, carryin);
  510. LITTLE(out++;in++;)
  511. while (--len) {
  512. BIG(--out;--in;);
  513. carryin = carry;
  514. mul64_ppmmaa(carry, prod, *in, k, carryin, *out);
  515. *out = prod;
  516. LITTLE(out++;in++;)
  517. }
  518. return carry;
  519. }
  520. #elif defined(BNWORD128)
  521. BNWORD64
  522. lbnMulAdd1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  523. {
  524. BNWORD128 p;
  525. assert(len > 0);
  526. p = (BNWORD128)BIGLITTLE(*--in,*in++) * k + BIGLITTLE(*--out,*out);
  527. BIGLITTLE(*out,*out++) = (BNWORD64)p;
  528. while (--len) {
  529. p = (BNWORD128)BIGLITTLE(*--in,*in++) * k +
  530. (BNWORD64)(p >> 64) + BIGLITTLE(*--out,*out);
  531. BIGLITTLE(*out,*out++) = (BNWORD64)p;
  532. }
  533. return (BNWORD64)(p >> 64);
  534. }
  535. #else
  536. #error No 64x64 -> 128 multiply available for 64-bit bignum package
  537. #endif
  538. #endif /* lbnMulAdd1_64 */
  539. /*
  540. * lbnMulSub1_64: Multiply an n-word input by a 1-word input and subtract the
  541. * n-word product from the destination. Returns the n+1st word of the product.
  542. * This uses either the mul64_ppmm and mul64_ppmma macros, or
  543. * C multiplication with the BNWORD128 type.
  544. *
  545. * This is rather uglier than adding, but fortunately it's only used in
  546. * division which is not used too heavily.
  547. */
  548. #ifndef lbnMulSub1_64
  549. #if defined(mul64_ppmm)
  550. BNWORD64
  551. lbnMulSub1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  552. {
  553. BNWORD64 prod, carry, carryin;
  554. assert(len > 0);
  555. BIG(--in;)
  556. mul64_ppmm(carry, prod, *in, k);
  557. LITTLE(in++;)
  558. carry += (BIGLITTLE(*--out,*out++) -= prod) > (BNWORD64)~prod;
  559. while (--len) {
  560. BIG(--in;);
  561. carryin = carry;
  562. mul64_ppmma(carry, prod, *in, k, carryin);
  563. LITTLE(in++;)
  564. carry += (BIGLITTLE(*--out,*out++) -= prod) > (BNWORD64)~prod;
  565. }
  566. return carry;
  567. }
  568. #elif defined(BNWORD128)
  569. BNWORD64
  570. lbnMulSub1_64(BNWORD64 *out, BNWORD64 const *in, unsigned len, BNWORD64 k)
  571. {
  572. BNWORD128 p;
  573. BNWORD64 carry, t;
  574. assert(len > 0);
  575. p = (BNWORD128)BIGLITTLE(*--in,*in++) * k;
  576. t = BIGLITTLE(*--out,*out);
  577. carry = (BNWORD64)(p>>64) + ((BIGLITTLE(*out,*out++)=t-(BNWORD64)p) > t);
  578. while (--len) {
  579. p = (BNWORD128)BIGLITTLE(*--in,*in++) * k + carry;
  580. t = BIGLITTLE(*--out,*out);
  581. carry = (BNWORD64)(p>>64) +
  582. ( (BIGLITTLE(*out,*out++)=t-(BNWORD64)p) > t );
  583. }
  584. return carry;
  585. }
  586. #else
  587. #error No 64x64 -> 128 multiply available for 64-bit bignum package
  588. #endif
  589. #endif /* !lbnMulSub1_64 */
  590. /*
  591. * Shift n words left "shift" bits. 0 < shift < 64. Returns the
  592. * carry, any bits shifted off the left-hand side (0 <= carry < 2^shift).
  593. */
  594. #ifndef lbnLshift_64
  595. BNWORD64
  596. lbnLshift_64(BNWORD64 *num, unsigned len, unsigned shift)
  597. {
  598. BNWORD64 x, carry;
  599. assert(shift > 0);
  600. assert(shift < 64);
  601. carry = 0;
  602. while (len--) {
  603. BIG(--num;)
  604. x = *num;
  605. *num = (x<<shift) | carry;
  606. LITTLE(num++;)
  607. carry = x >> (64-shift);
  608. }
  609. return carry;
  610. }
  611. #endif /* !lbnLshift_64 */
  612. /*
  613. * An optimized version of the above, for shifts of 1.
  614. * Some machines can use add-with-carry tricks for this.
  615. */
  616. #ifndef lbnDouble_64
  617. BNWORD64
  618. lbnDouble_64(BNWORD64 *num, unsigned len)
  619. {
  620. BNWORD64 x, carry;
  621. carry = 0;
  622. while (len--) {
  623. BIG(--num;)
  624. x = *num;
  625. *num = (x<<1) | carry;
  626. LITTLE(num++;)
  627. carry = x >> (64-1);
  628. }
  629. return carry;
  630. }
  631. #endif /* !lbnDouble_64 */
  632. /*
  633. * Shift n words right "shift" bits. 0 < shift < 64. Returns the
  634. * carry, any bits shifted off the right-hand side (0 <= carry < 2^shift).
  635. */
  636. #ifndef lbnRshift_64
  637. BNWORD64
  638. lbnRshift_64(BNWORD64 *num, unsigned len, unsigned shift)
  639. {
  640. BNWORD64 x, carry = 0;
  641. assert(shift > 0);
  642. assert(shift < 64);
  643. BIGLITTLE(num -= len, num += len);
  644. while (len--) {
  645. LITTLE(--num;)
  646. x = *num;
  647. *num = (x>>shift) | carry;
  648. BIG(num++;)
  649. carry = x << (64-shift);
  650. }
  651. return carry >> (64-shift);
  652. }
  653. #endif /* !lbnRshift_64 */
  654. /*
  655. * Multiply two numbers of the given lengths. prod and num2 may overlap,
  656. * provided that the low len1 bits of prod are free. (This corresponds
  657. * nicely to the place the result is returned from lbnMontReduce_64.)
  658. *
  659. * TODO: Use Karatsuba multiply. The overlap constraints may have
  660. * to get rewhacked.
  661. */
  662. #ifndef lbnMul_64
  663. void
  664. lbnMul_64(BNWORD64 *prod, BNWORD64 const *num1, unsigned len1,
  665. BNWORD64 const *num2, unsigned len2)
  666. {
  667. /* Special case of zero */
  668. if (!len1 || !len2) {
  669. lbnZero_64(prod, len1+len2);
  670. return;
  671. }
  672. /* Multiply first word */
  673. lbnMulN1_64(prod, num1, len1, BIGLITTLE(*--num2,*num2++));
  674. /*
  675. * Add in subsequent words, storing the most significant word,
  676. * which is new each time.
  677. */
  678. while (--len2) {
  679. BIGLITTLE(--prod,prod++);
  680. BIGLITTLE(*(prod-len1-1),*(prod+len1)) =
  681. lbnMulAdd1_64(prod, num1, len1, BIGLITTLE(*--num2,*num2++));
  682. }
  683. }
  684. #endif /* !lbnMul_64 */
  685. /*
  686. * lbnMulX_64 is a square multiply - both inputs are the same length.
  687. * It's normally just a macro wrapper around the general multiply,
  688. * but might be implementable in assembly more efficiently (such as
  689. * when product scanning).
  690. */
  691. #ifndef lbnMulX_64
  692. #if defined(BNWORD128) && PRODUCT_SCAN
  693. /*
  694. * Test code to see whether product scanning is any faster. It seems
  695. * to make the C code slower, so PRODUCT_SCAN is not defined.
  696. */
  697. static void
  698. lbnMulX_64(BNWORD64 *prod, BNWORD64 const *num1, BNWORD64 const *num2,
  699. unsigned len)
  700. {
  701. BNWORD128 x, y;
  702. BNWORD64 const *p1, *p2;
  703. unsigned carry;
  704. unsigned i, j;
  705. /* Special case of zero */
  706. if (!len)
  707. return;
  708. x = (BNWORD128)BIGLITTLE(num1[-1] * num2[-1], num1[0] * num2[0]);
  709. BIGLITTLE(*--prod, *prod++) = (BNWORD64)x;
  710. x >>= 64;
  711. for (i = 1; i < len; i++) {
  712. carry = 0;
  713. p1 = num1;
  714. p2 = BIGLITTLE(num2-i-1,num2+i+1);
  715. for (j = 0; j <= i; j++) {
  716. BIG(y = (BNWORD128)*--p1 * *p2++;)
  717. LITTLE(y = (BNWORD128)*p1++ * *--p2;)
  718. x += y;
  719. carry += (x < y);
  720. }
  721. BIGLITTLE(*--prod,*prod++) = (BNWORD64)x;
  722. x = (x >> 64) | (BNWORD128)carry << 64;
  723. }
  724. for (i = 1; i < len; i++) {
  725. carry = 0;
  726. p1 = BIGLITTLE(num1-i,num1+i);
  727. p2 = BIGLITTLE(num2-len,num2+len);
  728. for (j = i; j < len; j++) {
  729. BIG(y = (BNWORD128)*--p1 * *p2++;)
  730. LITTLE(y = (BNWORD128)*p1++ * *--p2;)
  731. x += y;
  732. carry += (x < y);
  733. }
  734. BIGLITTLE(*--prod,*prod++) = (BNWORD64)x;
  735. x = (x >> 64) | (BNWORD128)carry << 64;
  736. }
  737. BIGLITTLE(*--prod,*prod) = (BNWORD64)x;
  738. }
  739. #else /* !defined(BNWORD128) || !PRODUCT_SCAN */
  740. /* Default trivial macro definition */
  741. #define lbnMulX_64(prod, num1, num2, len) lbnMul_64(prod, num1, len, num2, len)
  742. #endif /* !defined(BNWORD128) || !PRODUCT_SCAN */
  743. #endif /* !lbmMulX_64 */
  744. #if !defined(lbnMontMul_64) && defined(BNWORD128) && PRODUCT_SCAN
  745. /*
  746. * Test code for product-scanning multiply. This seems to slow the C
  747. * code down rather than speed it up.
  748. * This does a multiply and Montgomery reduction together, using the
  749. * same loops. The outer loop scans across the product, twice.
  750. * The first pass computes the low half of the product and the
  751. * Montgomery multipliers. These are stored in the product array,
  752. * which contains no data as of yet. x and carry add up the columns
  753. * and propagate carries forward.
  754. *
  755. * The second half multiplies the upper half, adding in the modulus
  756. * times the Montgomery multipliers. The results of this multiply
  757. * are stored.
  758. */
  759. static void
  760. lbnMontMul_64(BNWORD64 *prod, BNWORD64 const *num1, BNWORD64 const *num2,
  761. BNWORD64 const *mod, unsigned len, BNWORD64 inv)
  762. {
  763. BNWORD128 x, y;
  764. BNWORD64 const *p1, *p2, *pm;
  765. BNWORD64 *pp;
  766. BNWORD64 t;
  767. unsigned carry;
  768. unsigned i, j;
  769. /* Special case of zero */
  770. if (!len)
  771. return;
  772. /*
  773. * This computes directly into the high half of prod, so just
  774. * shift the pointer and consider prod only "len" elements long
  775. * for the rest of the code.
  776. */
  777. BIGLITTLE(prod -= len, prod += len);
  778. /* Pass 1 - compute Montgomery multipliers */
  779. /* First iteration can have certain simplifications. */
  780. x = (BNWORD128)BIGLITTLE(num1[-1] * num2[-1], num1[0] * num2[0]);
  781. BIGLITTLE(prod[-1], prod[0]) = t = inv * (BNWORD64)x;
  782. y = (BNWORD128)t * BIGLITTLE(mod[-1],mod[0]);
  783. x += y;
  784. /* Note: GCC 2.6.3 has a bug if you try to eliminate "carry" */
  785. carry = (x < y);
  786. assert((BNWORD64)x == 0);
  787. x = x >> 64 | (BNWORD128)carry << 64;
  788. for (i = 1; i < len; i++) {
  789. carry = 0;
  790. p1 = num1;
  791. p2 = BIGLITTLE(num2-i-1,num2+i+1);
  792. pp = prod;
  793. pm = BIGLITTLE(mod-i-1,mod+i+1);
  794. for (j = 0; j < i; j++) {
  795. y = (BNWORD128)BIGLITTLE(*--p1 * *p2++, *p1++ * *--p2);
  796. x += y;
  797. carry += (x < y);
  798. y = (BNWORD128)BIGLITTLE(*--pp * *pm++, *pp++ * *--pm);
  799. x += y;
  800. carry += (x < y);
  801. }
  802. y = (BNWORD128)BIGLITTLE(p1[-1] * p2[0], p1[0] * p2[-1]);
  803. x += y;
  804. carry += (x < y);
  805. assert(BIGLITTLE(pp == prod-i, pp == prod+i));
  806. BIGLITTLE(pp[-1], pp[0]) = t = inv * (BNWORD64)x;
  807. assert(BIGLITTLE(pm == mod-1, pm == mod+1));
  808. y = (BNWORD128)t * BIGLITTLE(pm[0],pm[-1]);
  809. x += y;
  810. carry += (x < y);
  811. assert((BNWORD64)x == 0);
  812. x = x >> 64 | (BNWORD128)carry << 64;
  813. }
  814. /* Pass 2 - compute reduced product and store */
  815. for (i = 1; i < len; i++) {
  816. carry = 0;
  817. p1 = BIGLITTLE(num1-i,num1+i);
  818. p2 = BIGLITTLE(num2-len,num2+len);
  819. pm = BIGLITTLE(mod-i,mod+i);
  820. pp = BIGLITTLE(prod-len,prod+len);
  821. for (j = i; j < len; j++) {
  822. y = (BNWORD128)BIGLITTLE(*--p1 * *p2++, *p1++ * *--p2);
  823. x += y;
  824. carry += (x < y);
  825. y = (BNWORD128)BIGLITTLE(*--pm * *pp++, *pm++ * *--pp);
  826. x += y;
  827. carry += (x < y);
  828. }
  829. assert(BIGLITTLE(pm == mod-len, pm == mod+len));
  830. assert(BIGLITTLE(pp == prod-i, pp == prod+i));
  831. BIGLITTLE(pp[0],pp[-1]) = (BNWORD64)x;
  832. x = (x >> 64) | (BNWORD128)carry << 64;
  833. }
  834. /* Last round of second half, simplified. */
  835. BIGLITTLE(*(prod-len),*(prod+len-1)) = (BNWORD64)x;
  836. carry = (x >> 64);
  837. while (carry)
  838. carry -= lbnSubN_64(prod, mod, len);
  839. while (lbnCmp_64(prod, mod, len) >= 0)
  840. (void)lbnSubN_64(prod, mod, len);
  841. }
  842. /* Suppress later definition */
  843. #define lbnMontMul_64 lbnMontMul_64
  844. #endif
  845. #if !defined(lbnSquare_64) && defined(BNWORD128) && PRODUCT_SCAN
  846. /*
  847. * Trial code for product-scanning squaring. This seems to slow the C
  848. * code down rather than speed it up.
  849. */
  850. void
  851. lbnSquare_64(BNWORD64 *prod, BNWORD64 const *num, unsigned len)
  852. {
  853. BNWORD128 x, y, z;
  854. BNWORD64 const *p1, *p2;
  855. unsigned carry;
  856. unsigned i, j;
  857. /* Special case of zero */
  858. if (!len)
  859. return;
  860. /* Word 0 of product */
  861. x = (BNWORD128)BIGLITTLE(num[-1] * num[-1], num[0] * num[0]);
  862. BIGLITTLE(*--prod, *prod++) = (BNWORD64)x;
  863. x >>= 64;
  864. /* Words 1 through len-1 */
  865. for (i = 1; i < len; i++) {
  866. carry = 0;
  867. y = 0;
  868. p1 = num;
  869. p2 = BIGLITTLE(num-i-1,num+i+1);
  870. for (j = 0; j < (i+1)/2; j++) {
  871. BIG(z = (BNWORD128)*--p1 * *p2++;)
  872. LITTLE(z = (BNWORD128)*p1++ * *--p2;)
  873. y += z;
  874. carry += (y < z);
  875. }
  876. y += z = y;
  877. carry += carry + (y < z);
  878. if ((i & 1) == 0) {
  879. assert(BIGLITTLE(--p1 == p2, p1 == --p2));
  880. BIG(z = (BNWORD128)*p2 * *p2;)
  881. LITTLE(z = (BNWORD128)*p1 * *p1;)
  882. y += z;
  883. carry += (y < z);
  884. }
  885. x += y;
  886. carry += (x < y);
  887. BIGLITTLE(*--prod,*prod++) = (BNWORD64)x;
  888. x = (x >> 64) | (BNWORD128)carry << 64;
  889. }
  890. /* Words len through 2*len-2 */
  891. for (i = 1; i < len; i++) {
  892. carry = 0;
  893. y = 0;
  894. p1 = BIGLITTLE(num-i,num+i);
  895. p2 = BIGLITTLE(num-len,num+len);
  896. for (j = 0; j < (len-i)/2; j++) {
  897. BIG(z = (BNWORD128)*--p1 * *p2++;)
  898. LITTLE(z = (BNWORD128)*p1++ * *--p2;)
  899. y += z;
  900. carry += (y < z);
  901. }
  902. y += z = y;
  903. carry += carry + (y < z);
  904. if ((len-i) & 1) {
  905. assert(BIGLITTLE(--p1 == p2, p1 == --p2));
  906. BIG(z = (BNWORD128)*p2 * *p2;)
  907. LITTLE(z = (BNWORD128)*p1 * *p1;)
  908. y += z;
  909. carry += (y < z);
  910. }
  911. x += y;
  912. carry += (x < y);
  913. BIGLITTLE(*--prod,*prod++) = (BNWORD64)x;
  914. x = (x >> 64) | (BNWORD128)carry << 64;
  915. }
  916. /* Word 2*len-1 */
  917. BIGLITTLE(*--prod,*prod) = (BNWORD64)x;
  918. }
  919. /* Suppress later definition */
  920. #define lbnSquare_64 lbnSquare_64
  921. #endif
  922. /*
  923. * Square a number, using optimized squaring to reduce the number of
  924. * primitive multiples that are executed. There may not be any
  925. * overlap of the input and output.
  926. *
  927. * Technique: Consider the partial products in the multiplication
  928. * of "abcde" by itself:
  929. *
  930. * a b c d e
  931. * * a b c d e
  932. * ==================
  933. * ae be ce de ee
  934. * ad bd cd dd de
  935. * ac bc cc cd ce
  936. * ab bb bc bd be
  937. * aa ab ac ad ae
  938. *
  939. * Note that everything above the main diagonal:
  940. * ae be ce de = (abcd) * e
  941. * ad bd cd = (abc) * d
  942. * ac bc = (ab) * c
  943. * ab = (a) * b
  944. *
  945. * is a copy of everything below the main diagonal:
  946. * de
  947. * cd ce
  948. * bc bd be
  949. * ab ac ad ae
  950. *
  951. * Thus, the sum is 2 * (off the diagonal) + diagonal.
  952. *
  953. * This is accumulated beginning with the diagonal (which
  954. * consist of the squares of the digits of the input), which is then
  955. * divided by two, the off-diagonal added, and multiplied by two
  956. * again. The low bit is simply a copy of the low bit of the
  957. * input, so it doesn't need special care.
  958. *
  959. * TODO: Merge the shift by 1 with the squaring loop.
  960. * TODO: Use Karatsuba. (a*W+b)^2 = a^2 * (W^2+W) + b^2 * (W+1) - (a-b)^2 * W.
  961. */
  962. #ifndef lbnSquare_64
  963. void
  964. lbnSquare_64(BNWORD64 *prod, BNWORD64 const *num, unsigned len)
  965. {
  966. BNWORD64 t;
  967. BNWORD64 *prodx = prod; /* Working copy of the argument */
  968. BNWORD64 const *numx = num; /* Working copy of the argument */
  969. unsigned lenx = len; /* Working copy of the argument */
  970. if (!len)
  971. return;
  972. /* First, store all the squares */
  973. while (lenx--) {
  974. #ifdef mul64_ppmm
  975. BNWORD64 ph, pl;
  976. t = BIGLITTLE(*--numx,*numx++);
  977. mul64_ppmm(ph,pl,t,t);
  978. BIGLITTLE(*--prodx,*prodx++) = pl;
  979. BIGLITTLE(*--prodx,*prodx++) = ph;
  980. #elif defined(BNWORD128) /* use BNWORD128 */
  981. BNWORD128 p;
  982. t = BIGLITTLE(*--numx,*numx++);
  983. p = (BNWORD128)t * t;
  984. BIGLITTLE(*--prodx,*prodx++) = (BNWORD64)p;
  985. BIGLITTLE(*--prodx,*prodx++) = (BNWORD64)(p>>64);
  986. #else /* Use lbnMulN1_64 */
  987. t = BIGLITTLE(numx[-1],*numx);
  988. lbnMulN1_64(prodx, numx, 1, t);
  989. BIGLITTLE(--numx,numx++);
  990. BIGLITTLE(prodx -= 2, prodx += 2);
  991. #endif
  992. }
  993. /* Then, shift right 1 bit */
  994. (void)lbnRshift_64(prod, 2*len, 1);
  995. /* Then, add in the off-diagonal sums */
  996. lenx = len;
  997. numx = num;
  998. prodx = prod;
  999. while (--lenx) {
  1000. t = BIGLITTLE(*--numx,*numx++);
  1001. BIGLITTLE(--prodx,prodx++);
  1002. t = lbnMulAdd1_64(prodx, numx, lenx, t);
  1003. lbnAdd1_64(BIGLITTLE(prodx-lenx,prodx+lenx), lenx+1, t);
  1004. BIGLITTLE(--prodx,prodx++);
  1005. }
  1006. /* Shift it back up */
  1007. lbnDouble_64(prod, 2*len);
  1008. /* And set the low bit appropriately */
  1009. BIGLITTLE(prod[-1],prod[0]) |= BIGLITTLE(num[-1],num[0]) & 1;
  1010. }
  1011. #endif /* !lbnSquare_64 */
  1012. /*
  1013. * lbnNorm_64 - given a number, return a modified length such that the
  1014. * most significant digit is non-zero. Zero-length input is okay.
  1015. */
  1016. #ifndef lbnNorm_64
  1017. unsigned
  1018. lbnNorm_64(BNWORD64 const *num, unsigned len)
  1019. {
  1020. BIGLITTLE(num -= len,num += len);
  1021. while (len && BIGLITTLE(*num++,*--num) == 0)
  1022. --len;
  1023. return len;
  1024. }
  1025. #endif /* lbnNorm_64 */
  1026. /*
  1027. * lbnBits_64 - return the number of significant bits in the array.
  1028. * It starts by normalizing the array. Zero-length input is okay.
  1029. * Then assuming there's anything to it, it fetches the high word,
  1030. * generates a bit length by multiplying the word length by 64, and
  1031. * subtracts off 64/2, 64/4, 64/8, ... bits if the high bits are clear.
  1032. */
  1033. #ifndef lbnBits_64
  1034. unsigned
  1035. lbnBits_64(BNWORD64 const *num, unsigned len)
  1036. {
  1037. BNWORD64 t;
  1038. unsigned i;
  1039. len = lbnNorm_64(num, len);
  1040. if (len) {
  1041. t = BIGLITTLE(*(num-len),*(num+(len-1)));
  1042. assert(t);
  1043. len *= 64;
  1044. i = 64/2;
  1045. do {
  1046. if (t >> i)
  1047. t >>= i;
  1048. else
  1049. len -= i;
  1050. } while ((i /= 2) != 0);
  1051. }
  1052. return len;
  1053. }
  1054. #endif /* lbnBits_64 */
  1055. /*
  1056. * If defined, use hand-rolled divide rather than compiler's native.
  1057. * If the machine doesn't do it in line, the manual code is probably
  1058. * faster, since it can assume normalization and the fact that the
  1059. * quotient will fit into 64 bits, which a general 128-bit divide
  1060. * in a compiler's run-time library can't do.
  1061. */
  1062. #ifndef BN_SLOW_DIVIDE_128
  1063. /* Assume that divisors of more than thirty-two bits are slow */
  1064. #define BN_SLOW_DIVIDE_128 (128 > 0x20)
  1065. #endif
  1066. /*
  1067. * Return (nh<<64|nl) % d, and place the quotient digit into *q.
  1068. * It is guaranteed that nh < d, and that d is normalized (with its high
  1069. * bit set). If we have a double-width type, it's easy. If not, ooh,
  1070. * yuk!
  1071. */
  1072. #ifndef lbnDiv21_64
  1073. #if defined(BNWORD128) && !BN_SLOW_DIVIDE_128
  1074. BNWORD64
  1075. lbnDiv21_64(BNWORD64 *q, BNWORD64 nh, BNWORD64 nl, BNWORD64 d)
  1076. {
  1077. BNWORD128 n = (BNWORD128)nh << 64 | nl;
  1078. /* Divisor must be normalized */
  1079. assert(d >> (64-1) == 1);
  1080. *q = n / d;
  1081. return n % d;
  1082. }
  1083. #else
  1084. /*
  1085. * This is where it gets ugly.
  1086. *
  1087. * Do the division in two halves, using Algorithm D from section 4.3.1
  1088. * of Knuth. Note Theorem B from that section, that the quotient estimate
  1089. * is never more than the true quotient, and is never more than two
  1090. * too low.
  1091. *
  1092. * The mapping onto conventional long division is (everything a half word):
  1093. * _____________qh___ql_
  1094. * dh dl ) nh.h nh.l nl.h nl.l
  1095. * - (qh * d)
  1096. * -----------
  1097. * rrrr rrrr nl.l
  1098. * - (ql * d)
  1099. * -----------
  1100. * rrrr rrrr
  1101. *
  1102. * The implicit 3/2-digit d*qh and d*ql subtractors are computed this way:
  1103. * First, estimate a q digit so that nh/dh works. Subtracting qh*dh from
  1104. * the (nh.h nh.l) list leaves a 1/2-word remainder r. Then compute the
  1105. * low part of the subtractor, qh * dl. This also needs to be subtracted
  1106. * from (nh.h nh.l nl.h) to get the final remainder. So we take the
  1107. * remainder, which is (nh.h nh.l) - qh*dl, shift it and add in nl.h, and
  1108. * try to subtract qh * dl from that. Since the remainder is 1/2-word
  1109. * long, shifting and adding nl.h results in a single word r.
  1110. * It is possible that the remainder we're working with, r, is less than
  1111. * the product qh * dl, if we estimated qh too high. The estimation
  1112. * technique can produce a qh that is too large (never too small), leading
  1113. * to r which is too small. In that case, decrement the digit qh, add
  1114. * shifted dh to r (to correct for that error), and subtract dl from the
  1115. * product we're comparing r with. That's the "correct" way to do it, but
  1116. * just adding dl to r instead of subtracting it from the product is
  1117. * equivalent and a lot simpler. You just have to watch out for overflow.
  1118. *
  1119. * The process is repeated with (rrrr rrrr nl.l) for the low digit of the
  1120. * quotient ql.
  1121. *
  1122. * The various uses of 64/2 for shifts are because of the note about
  1123. * automatic editing of this file at the very top of the file.
  1124. */
  1125. #define highhalf(x) ( (x) >> 64/2 )
  1126. #define lowhalf(x) ( (x) & (((BNWORD64)1 << 64/2)-1) )
  1127. BNWORD64
  1128. lbnDiv21_64(BNWORD64 *q, BNWORD64 nh, BNWORD64 nl, BNWORD64 d)
  1129. {
  1130. BNWORD64 dh = highhalf(d), dl = lowhalf(d);
  1131. BNWORD64 qh, ql, prod, r;
  1132. /* Divisor must be normalized */
  1133. assert((d >> (64-1)) == 1);
  1134. /* Do first half-word of division */
  1135. qh = nh / dh;
  1136. r = nh % dh;
  1137. prod = qh * dl;
  1138. /*
  1139. * Add next half-word of numerator to remainder and correct.
  1140. * qh may be up to two too large.
  1141. */
  1142. r = (r << (64/2)) | highhalf(nl);
  1143. if (r < prod) {
  1144. --qh; r += d;
  1145. if (r >= d && r < prod) {
  1146. --qh; r += d;
  1147. }
  1148. }
  1149. r -= prod;
  1150. /* Do second half-word of division */
  1151. ql = r / dh;
  1152. r = r % dh;
  1153. prod = ql * dl;
  1154. r = (r << (64/2)) | lowhalf(nl);
  1155. if (r < prod) {
  1156. --ql; r += d;
  1157. if (r >= d && r < prod) {
  1158. --ql; r += d;
  1159. }
  1160. }
  1161. r -= prod;
  1162. *q = (qh << (64/2)) | ql;
  1163. return r;
  1164. }
  1165. #endif
  1166. #endif /* lbnDiv21_64 */
  1167. /*
  1168. * In the division functions, the dividend and divisor are referred to
  1169. * as "n" and "d", which stand for "numerator" and "denominator".
  1170. *
  1171. * The quotient is (nlen-dlen+1) digits long. It may be overlapped with
  1172. * the high (nlen-dlen) words of the dividend, but one extra word is needed
  1173. * on top to hold the top word.
  1174. */
  1175. /*
  1176. * Divide an n-word number by a 1-word number, storing the remainder
  1177. * and n-1 words of the n-word quotient. The high word is returned.
  1178. * It IS legal for rem to point to the same address as n, and for
  1179. * q to point one word higher.
  1180. *
  1181. * TODO: If BN_SLOW_DIVIDE_128, add a divnhalf_64 which uses 64-bit
  1182. * dividends if the divisor is half that long.
  1183. * TODO: Shift the dividend on the fly to avoid the last division and
  1184. * instead have a remainder that needs shifting.
  1185. * TODO: Use reciprocals rather than dividing.
  1186. */
  1187. #ifndef lbnDiv1_64
  1188. BNWORD64
  1189. lbnDiv1_64(BNWORD64 *q, BNWORD64 *rem, BNWORD64 const *n, unsigned len,
  1190. BNWORD64 d)
  1191. {
  1192. unsigned shift;
  1193. unsigned xlen;
  1194. BNWORD64 r;
  1195. BNWORD64 qhigh;
  1196. assert(len > 0);
  1197. assert(d);
  1198. if (len == 1) {
  1199. r = *n;
  1200. *rem = r%d;
  1201. return r/d;
  1202. }
  1203. shift = 0;
  1204. r = d;
  1205. xlen = 64/2;
  1206. do {
  1207. if (r >> xlen)
  1208. r >>= xlen;
  1209. else
  1210. shift += xlen;
  1211. } while ((xlen /= 2) != 0);
  1212. assert((d >> (64-1-shift)) == 1);
  1213. d <<= shift;
  1214. BIGLITTLE(q -= len-1,q += len-1);
  1215. BIGLITTLE(n -= len,n += len);
  1216. r = BIGLITTLE(*n++,*--n);
  1217. if (r < d) {
  1218. qhigh = 0;
  1219. } else {
  1220. qhigh = r/d;
  1221. r %= d;
  1222. }
  1223. xlen = len;
  1224. while (--xlen)
  1225. r = lbnDiv21_64(BIGLITTLE(q++,--q), r, BIGLITTLE(*n++,*--n), d);
  1226. /*
  1227. * Final correction for shift - shift the quotient up "shift"
  1228. * bits, and merge in the extra bits of quotient. Then reduce
  1229. * the final remainder mod the real d.
  1230. */
  1231. if (shift) {
  1232. d >>= shift;
  1233. qhigh = (qhigh << shift) | lbnLshift_64(q, len-1, shift);
  1234. BIGLITTLE(q[-1],*q) |= r/d;
  1235. r %= d;
  1236. }
  1237. *rem = r;
  1238. return qhigh;
  1239. }
  1240. #endif
  1241. /*
  1242. * This function performs a "quick" modulus of a number with a divisor
  1243. * d which is guaranteed to be at most sixteen bits, i.e. less than 65536.
  1244. * This applies regardless of the word size the library is compiled with.
  1245. *
  1246. * This function is important to prime generation, for sieving.
  1247. */
  1248. #ifndef lbnModQ_64
  1249. /* If there's a custom lbnMod21_64, no normalization needed */
  1250. #ifdef lbnMod21_64
  1251. unsigned
  1252. lbnModQ_64(BNWORD64 const *n, unsigned len, unsigned d)
  1253. {
  1254. unsigned i, shift;
  1255. BNWORD64 r;
  1256. assert(len > 0);
  1257. BIGLITTLE(n -= len,n += len);
  1258. /* Try using a compare to avoid the first divide */
  1259. r = BIGLITTLE(*n++,*--n);
  1260. if (r >= d)
  1261. r %= d;
  1262. while (--len)
  1263. r = lbnMod21_64(r, BIGLITTLE(*n++,*--n), d);
  1264. return r;
  1265. }
  1266. #elif defined(BNWORD128) && !BN_SLOW_DIVIDE_128
  1267. unsigned
  1268. lbnModQ_64(BNWORD64 const *n, unsigned len, unsigned d)
  1269. {
  1270. BNWORD64 r;
  1271. if (!--len)
  1272. return BIGLITTLE(n[-1],n[0]) % d;
  1273. BIGLITTLE(n -= len,n += len);
  1274. r = BIGLITTLE(n[-1],n[0]);
  1275. do {
  1276. r = (BNWORD64)((((BNWORD128)r<<64) | BIGLITTLE(*n++,*--n)) % d);
  1277. } while (--len);
  1278. return r;
  1279. }
  1280. #elif 64 >= 0x20
  1281. /*
  1282. * If the single word size can hold 65535*65536, then this function
  1283. * is avilable.
  1284. */
  1285. #ifndef highhalf
  1286. #define highhalf(x) ( (x) >> 64/2 )
  1287. #define lowhalf(x) ( (x) & ((1 << 64/2)-1) )
  1288. #endif
  1289. unsigned
  1290. lbnModQ_64(BNWORD64 const *n, unsigned len, unsigned d)
  1291. {
  1292. BNWORD64 r, x;
  1293. BIGLITTLE(n -= len,n += len);
  1294. r = BIGLITTLE(*n++,*--n);
  1295. while (--len) {
  1296. x = BIGLITTLE(*n++,*--n);
  1297. r = (r%d << 64/2) | highhalf(x);
  1298. r = (r%d << 64/2) | lowhalf(x);
  1299. }
  1300. return r%d;
  1301. }
  1302. #else
  1303. /* Default case - use lbnDiv21_64 */
  1304. unsigned
  1305. lbnModQ_64(BNWORD64 const *n, unsigned len, unsigned d)
  1306. {
  1307. unsigned i, shift;
  1308. BNWORD64 r;
  1309. BNWORD64 q;
  1310. assert(len > 0);
  1311. shift = 0;
  1312. r = d;
  1313. i = 64;
  1314. while (i /= 2) {
  1315. if (r >> i)
  1316. r >>= i;
  1317. else
  1318. shift += i;
  1319. }
  1320. assert(d >> (64-1-shift) == 1);
  1321. d <<= shift;
  1322. BIGLITTLE(n -= len,n += len);
  1323. r = BIGLITTLE(*n++,*--n);
  1324. if (r >= d)
  1325. r %= d;
  1326. while (--len)
  1327. r = lbnDiv21_64(&q, r, BIGLITTLE(*n++,*--n), d);
  1328. /*
  1329. * Final correction for shift - shift the quotient up "shift"
  1330. * bits, and merge in the extra bits of quotient. Then reduce
  1331. * the final remainder mod the real d.
  1332. */
  1333. if (shift)
  1334. r %= d >> shift;
  1335. return r;
  1336. }
  1337. #endif
  1338. #endif /* lbnModQ_64 */
  1339. /*
  1340. * Reduce n mod d and return the quotient. That is, find:
  1341. * q = n / d;
  1342. * n = n % d;
  1343. * d is altered during the execution of this subroutine by normalizing it.
  1344. * It must already have its most significant word non-zero; it is shifted
  1345. * so its most significant bit is non-zero.
  1346. *
  1347. * The quotient q is nlen-dlen+1 words long. To make it possible to
  1348. * overlap the quptient with the input (you can store it in the high dlen
  1349. * words), the high word of the quotient is *not* stored, but is returned.
  1350. * (If all you want is the remainder, you don't care about it, anyway.)
  1351. *
  1352. * This uses algorithm D from Knuth (4.3.1), except that we do binary
  1353. * (shift) normalization of the divisor. WARNING: This is hairy!
  1354. *
  1355. * This function is used for some modular reduction, but it is not used in
  1356. * the modular exponentiation loops; they use Montgomery form and the
  1357. * corresponding, more efficient, Montgomery reduction. This code
  1358. * is needed for the conversion to Montgomery form, however, so it
  1359. * has to be here and it might as well be reasonably efficient.
  1360. *
  1361. * The overall operation is as follows ("top" and "up" refer to the
  1362. * most significant end of the number; "bottom" and "down", the least):
  1363. *
  1364. * - Shift the divisor up until the most significant bit is set.
  1365. * - Shift the dividend up the same amount. This will produce the
  1366. * correct quotient, and the remainder can be recovered by shifting
  1367. * it back down the same number of bits. This may produce an overflow
  1368. * word, but the word is always strictly less than the most significant
  1369. * divisor word.
  1370. * - Estimate the first quotient digit qhat:
  1371. * - First take the top two words (one of which is the overflow) of the
  1372. * dividend and divide by the top word of the divisor:
  1373. * qhat = (nh,nm)/dh. This qhat is >= the correct quotient digit
  1374. * and, since dh is normalized, it is at most two over.
  1375. * - Second, correct by comparing the top three words. If
  1376. * (dh,dl) * qhat > (nh,nm,ml), decrease qhat and try again.
  1377. * The second iteration can be simpler because there can't be a third.
  1378. * The computation can be simplified by subtracting dh*qhat from
  1379. * both sides, suitably shifted. This reduces the left side to
  1380. * dl*qhat. On the right, (nh,nm)-dh*qhat is simply the
  1381. * remainder r from (nh,nm)%dh, so the right is (r,nl).
  1382. * This produces qhat that is almost always correct and at
  1383. * most (prob ~ 2/2^64) one too high.
  1384. * - Subtract qhat times the divisor (suitably shifted) from the dividend.
  1385. * If there is a borrow, qhat was wrong, so decrement it
  1386. * and add the divisor back in (once).
  1387. * - Store the final quotient digit qhat in the quotient array q.
  1388. *
  1389. * Repeat the quotient digit computation for successive digits of the
  1390. * quotient until the whole quotient has been computed. Then shift the
  1391. * divisor and the remainder down to correct for the normalization.
  1392. *
  1393. * TODO: Special case 2-word divisors.
  1394. * TODO: Use reciprocals rather than dividing.
  1395. */
  1396. #ifndef divn_64
  1397. BNWORD64
  1398. lbnDiv_64(BNWORD64 *q, BNWORD64 *n, unsigned nlen, BNWORD64 *d, unsigned dlen)
  1399. {
  1400. BNWORD64 nh,nm,nl; /* Top three words of the dividend */
  1401. BNWORD64 dh,dl; /* Top two words of the divisor */
  1402. BNWORD64 qhat; /* Extimate of quotient word */
  1403. BNWORD64 r; /* Remainder from quotient estimate division */
  1404. BNWORD64 qhigh; /* High word of quotient */
  1405. unsigned i; /* Temp */
  1406. unsigned shift; /* Bits shifted by normalization */
  1407. unsigned qlen = nlen-dlen; /* Size of quotient (less 1) */
  1408. #ifdef mul64_ppmm
  1409. BNWORD64 t64;
  1410. #elif defined(BNWORD128)
  1411. BNWORD128 t128;
  1412. #else /* use lbnMulN1_64 */
  1413. BNWORD64 t2[2];
  1414. #define t2high BIGLITTLE(t2[0],t2[1])
  1415. #define t2low BIGLITTLE(t2[1],t2[0])
  1416. #endif
  1417. assert(dlen);
  1418. assert(nlen >= dlen);
  1419. /*
  1420. * Special cases for short divisors. The general case uses the
  1421. * top top 2 digits of the divisor (d) to estimate a quotient digit,
  1422. * so it breaks if there are fewer digits available. Thus, we need
  1423. * special cases for a divisor of length 1. A divisor of length
  1424. * 2 can have a *lot* of administrivia overhead removed removed,
  1425. * so it's probably worth special-casing that case, too.
  1426. */
  1427. if (dlen == 1)
  1428. return lbnDiv1_64(q, BIGLITTLE(n-1,n), n, nlen,
  1429. BIGLITTLE(d[-1],d[0]));
  1430. #if 0
  1431. /*
  1432. * @@@ This is not yet written... The general loop will do,
  1433. * albeit less efficiently
  1434. */
  1435. if (dlen == 2) {
  1436. /*
  1437. * divisor two digits long:
  1438. * use the 3/2 technique from Knuth, but we know
  1439. * it's exact.
  1440. */
  1441. dh = BIGLITTLE(d[-1],d[0]);
  1442. dl = BIGLITTLE(d[-2],d[1]);
  1443. shift = 0;
  1444. if ((sh & ((BNWORD64)1 << 64-1-shift)) == 0) {
  1445. do {
  1446. shift++;
  1447. } while (dh & (BNWORD64)1<<64-1-shift) == 0);
  1448. dh = dh << shift | dl >> (64-shift);
  1449. dl <<= shift;
  1450. }
  1451. for (shift = 0; (dh & (BNWORD64)1 << 64-1-shift)) == 0; shift++)
  1452. ;
  1453. if (shift) {
  1454. }
  1455. dh = dh << shift | dl >> (64-shift);
  1456. shift = 0;
  1457. while (dh
  1458. }
  1459. #endif
  1460. dh = BIGLITTLE(*(d-dlen),*(d+(dlen-1)));
  1461. assert(dh);
  1462. /* Normalize the divisor */
  1463. shift = 0;
  1464. r = dh;
  1465. i = 64/2;
  1466. do {
  1467. if (r >> i)
  1468. r >>= i;
  1469. else
  1470. shift += i;
  1471. } while ((i /= 2) != 0);
  1472. nh = 0;
  1473. if (shift) {
  1474. lbnLshift_64(d, dlen, shift);
  1475. dh = BIGLITTLE(*(d-dlen),*(d+(dlen-1)));
  1476. nh = lbnLshift_64(n, nlen, shift);
  1477. }
  1478. /* Assert that dh is now normalized */
  1479. assert(dh >> (64-1));
  1480. /* Also get the second-most significant word of the divisor */
  1481. dl = BIGLITTLE(*(d-(dlen-1)),*(d+(dlen-2)));
  1482. /*
  1483. * Adjust pointers: n to point to least significant end of first
  1484. * first subtract, and q to one the most-significant end of the
  1485. * quotient array.
  1486. */
  1487. BIGLITTLE(n -= qlen,n += qlen);
  1488. BIGLITTLE(q -= qlen,q += qlen);
  1489. /* Fetch the most significant stored word of the dividend */
  1490. nm = BIGLITTLE(*(n-dlen),*(n+(dlen-1)));
  1491. /*
  1492. * Compute the first digit of the quotient, based on the
  1493. * first two words of the dividend (the most significant of which
  1494. * is the overflow word h).
  1495. */
  1496. if (nh) {
  1497. assert(nh < dh);
  1498. r = lbnDiv21_64(&qhat, nh, nm, dh);
  1499. } else if (nm >= dh) {
  1500. qhat = nm/dh;
  1501. r = nm % dh;
  1502. } else { /* Quotient is zero */
  1503. qhigh = 0;
  1504. goto divloop;
  1505. }
  1506. /* Now get the third most significant word of the dividend */
  1507. nl = BIGLITTLE(*(n-(dlen-1)),*(n+(dlen-2)));
  1508. /*
  1509. * Correct qhat, the estimate of quotient digit.
  1510. * qhat can only be high, and at most two words high,
  1511. * so the loop can be unrolled and abbreviated.
  1512. */
  1513. #ifdef mul64_ppmm
  1514. mul64_ppmm(nm, t64, qhat, dl);
  1515. if (nm > r || (nm == r && t64 > nl)) {
  1516. /* Decrement qhat and adjust comparison parameters */
  1517. qhat--;
  1518. if ((r += dh) >= dh) {
  1519. nm -= (t64 < dl);
  1520. t64 -= dl;
  1521. if (nm > r || (nm == r && t64 > nl))
  1522. qhat--;
  1523. }
  1524. }
  1525. #elif defined(BNWORD128)
  1526. t128 = (BNWORD128)qhat * dl;
  1527. if (t128 > ((BNWORD128)r << 64) + nl) {
  1528. /* Decrement qhat and adjust comparison parameters */
  1529. qhat--;
  1530. if ((r += dh) > dh) {
  1531. t128 -= dl;
  1532. if (t128 > ((BNWORD128)r << 64) + nl)
  1533. qhat--;
  1534. }
  1535. }
  1536. #else /* Use lbnMulN1_64 */
  1537. lbnMulN1_64(BIGLITTLE(t2+2,t2), &dl, 1, qhat);
  1538. if (t2high > r || (t2high == r && t2low > nl)) {
  1539. /* Decrement qhat and adjust comparison parameters */
  1540. qhat--;
  1541. if ((r += dh) >= dh) {
  1542. t2high -= (t2low < dl);
  1543. t2low -= dl;
  1544. if (t2high > r || (t2high == r && t2low > nl))
  1545. qhat--;
  1546. }
  1547. }
  1548. #endif
  1549. /* Do the multiply and subtract */
  1550. r = lbnMulSub1_64(n, d, dlen, qhat);
  1551. /* If there was a borrow, add back once. */
  1552. if (r > nh) { /* Borrow? */
  1553. (void)lbnAddN_64(n, d, dlen);
  1554. qhat--;
  1555. }
  1556. /* Remember the first quotient digit. */
  1557. qhigh = qhat;
  1558. /* Now, the main division loop: */
  1559. divloop:
  1560. while (qlen--) {
  1561. /* Advance n */
  1562. nh = BIGLITTLE(*(n-dlen),*(n+(dlen-1)));
  1563. BIGLITTLE(++n,--n);
  1564. nm = BIGLITTLE(*(n-dlen),*(n+(dlen-1)));
  1565. if (nh == dh) {
  1566. qhat = ~(BNWORD64)0;
  1567. /* Optimized computation of r = (nh,nm) - qhat * dh */
  1568. r = nh + nm;
  1569. if (r < nh)
  1570. goto subtract;
  1571. } else {
  1572. assert(nh < dh);
  1573. r = lbnDiv21_64(&qhat, nh, nm, dh);
  1574. }
  1575. nl = BIGLITTLE(*(n-(dlen-1)),*(n+(dlen-2)));
  1576. #ifdef mul64_ppmm
  1577. mul64_ppmm(nm, t64, qhat, dl);
  1578. if (nm > r || (nm == r && t64 > nl)) {
  1579. /* Decrement qhat and adjust comparison parameters */
  1580. qhat--;
  1581. if ((r += dh) >= dh) {
  1582. nm -= (t64 < dl);
  1583. t64 -= dl;
  1584. if (nm > r || (nm == r && t64 > nl))
  1585. qhat--;
  1586. }
  1587. }
  1588. #elif defined(BNWORD128)
  1589. t128 = (BNWORD128)qhat * dl;
  1590. if (t128 > ((BNWORD128)r<<64) + nl) {
  1591. /* Decrement qhat and adjust comparison parameters */
  1592. qhat--;
  1593. if ((r += dh) >= dh) {
  1594. t128 -= dl;
  1595. if (t128 > ((BNWORD128)r << 64) + nl)
  1596. qhat--;
  1597. }
  1598. }
  1599. #else /* Use lbnMulN1_64 */
  1600. lbnMulN1_64(BIGLITTLE(t2+2,t2), &dl, 1, qhat);
  1601. if (t2high > r || (t2high == r && t2low > nl)) {
  1602. /* Decrement qhat and adjust comparison parameters */
  1603. qhat--;
  1604. if ((r += dh) >= dh) {
  1605. t2high -= (t2low < dl);
  1606. t2low -= dl;
  1607. if (t2high > r || (t2high == r && t2low > nl))
  1608. qhat--;
  1609. }
  1610. }
  1611. #endif
  1612. /*
  1613. * As a point of interest, note that it is not worth checking
  1614. * for qhat of 0 or 1 and installing special-case code. These
  1615. * occur with probability 2^-64, so spending 1 cycle to check
  1616. * for them is only worth it if we save more than 2^15 cycles,
  1617. * and a multiply-and-subtract for numbers in the 1024-bit
  1618. * range just doesn't take that long.
  1619. */
  1620. subtract:
  1621. /*
  1622. * n points to the least significant end of the substring
  1623. * of n to be subtracted from. qhat is either exact or
  1624. * one too large. If the subtract gets a borrow, it was
  1625. * one too large and the divisor is added back in. It's
  1626. * a dlen+1 word add which is guaranteed to produce a
  1627. * carry out, so it can be done very simply.
  1628. */
  1629. r = lbnMulSub1_64(n, d, dlen, qhat);
  1630. if (r > nh) { /* Borrow? */
  1631. (void)lbnAddN_64(n, d, dlen);
  1632. qhat--;
  1633. }
  1634. /* Store the quotient digit */
  1635. BIGLITTLE(*q++,*--q) = qhat;
  1636. }
  1637. /* Tah dah! */
  1638. if (shift) {
  1639. lbnRshift_64(d, dlen, shift);
  1640. lbnRshift_64(n, dlen, shift);
  1641. }
  1642. return qhigh;
  1643. }
  1644. #endif
  1645. /*
  1646. * Find the negative multiplicative inverse of x (x must be odd!) modulo 2^64.
  1647. *
  1648. * This just performs Newton's iteration until it gets the
  1649. * inverse. The initial estimate is always correct to 3 bits, and
  1650. * sometimes 4. The number of valid bits doubles each iteration.
  1651. * (To prove it, assume x * y == 1 (mod 2^n), and introduce a variable
  1652. * for the error mod 2^2n. x * y == 1 + k*2^n (mod 2^2n) and follow
  1653. * the iteration through.)
  1654. */
  1655. #ifndef lbnMontInv1_64
  1656. BNWORD64
  1657. lbnMontInv1_64(BNWORD64 const x)
  1658. {
  1659. BNWORD64 y = x, z;
  1660. assert(x & 1);
  1661. while ((z = x*y) != 1)
  1662. y *= 2 - z;
  1663. return -y;
  1664. }
  1665. #endif /* !lbnMontInv1_64 */
  1666. #if defined(BNWORD128) && PRODUCT_SCAN
  1667. /*
  1668. * Test code for product-scanning Montgomery reduction.
  1669. * This seems to slow the C code down rather than speed it up.
  1670. *
  1671. * The first loop computes the Montgomery multipliers, storing them over
  1672. * the low half of the number n.
  1673. *
  1674. * The second half multiplies the upper half, adding in the modulus
  1675. * times the Montgomery multipliers. The results of this multiply
  1676. * are stored.
  1677. */
  1678. void
  1679. lbnMontReduce_64(BNWORD64 *n, BNWORD64 const *mod, unsigned mlen, BNWORD64 inv)
  1680. {
  1681. BNWORD128 x, y;
  1682. BNWORD64 const *pm;
  1683. BNWORD64 *pn;
  1684. BNWORD64 t;
  1685. unsigned carry;
  1686. unsigned i, j;
  1687. /* Special case of zero */
  1688. if (!mlen)
  1689. return;
  1690. /* Pass 1 - compute Montgomery multipliers */
  1691. /* First iteration can have certain simplifications. */
  1692. t = BIGLITTLE(n[-1],n[0]);
  1693. x = t;
  1694. t *= inv;
  1695. BIGLITTLE(n[-1], n[0]) = t;
  1696. x += (BNWORD128)t * BIGLITTLE(mod[-1],mod[0]); /* Can't overflow */
  1697. assert((BNWORD64)x == 0);
  1698. x = x >> 64;
  1699. for (i = 1; i < mlen; i++) {
  1700. carry = 0;
  1701. pn = n;
  1702. pm = BIGLITTLE(mod-i-1,mod+i+1);
  1703. for (j = 0; j < i; j++) {
  1704. y = (BNWORD128)BIGLITTLE(*--pn * *pm++, *pn++ * *--pm);
  1705. x += y;
  1706. carry += (x < y);
  1707. }
  1708. assert(BIGLITTLE(pn == n-i, pn == n+i));
  1709. y = t = BIGLITTLE(pn[-1], pn[0]);
  1710. x += y;
  1711. carry += (x < y);
  1712. BIGLITTLE(pn[-1], pn[0]) = t = inv * (BNWORD64)x;
  1713. assert(BIGLITTLE(pm == mod-1, pm == mod+1));
  1714. y = (BNWORD128)t * BIGLITTLE(pm[0],pm[-1]);
  1715. x += y;
  1716. carry += (x < y);
  1717. assert((BNWORD64)x == 0);
  1718. x = x >> 64 | (BNWORD128)carry << 64;
  1719. }
  1720. BIGLITTLE(n -= mlen, n += mlen);
  1721. /* Pass 2 - compute upper words and add to n */
  1722. for (i = 1; i < mlen; i++) {
  1723. carry = 0;
  1724. pm = BIGLITTLE(mod-i,mod+i);
  1725. pn = n;
  1726. for (j = i; j < mlen; j++) {
  1727. y = (BNWORD128)BIGLITTLE(*--pm * *pn++, *pm++ * *--pn);
  1728. x += y;
  1729. carry += (x < y);
  1730. }
  1731. assert(BIGLITTLE(pm == mod-mlen, pm == mod+mlen));
  1732. assert(BIGLITTLE(pn == n+mlen-i, pn == n-mlen+i));
  1733. y = t = BIGLITTLE(*(n-i),*(n+i-1));
  1734. x += y;
  1735. carry += (x < y);
  1736. BIGLITTLE(*(n-i),*(n+i-1)) = (BNWORD64)x;
  1737. x = (x >> 64) | (BNWORD128)carry << 64;
  1738. }
  1739. /* Last round of second half, simplified. */
  1740. t = BIGLITTLE(*(n-mlen),*(n+mlen-1));
  1741. x += t;
  1742. BIGLITTLE(*(n-mlen),*(n+mlen-1)) = (BNWORD64)x;
  1743. carry = (unsigned)(x >> 64);
  1744. while (carry)
  1745. carry -= lbnSubN_64(n, mod, mlen);
  1746. while (lbnCmp_64(n, mod, mlen) >= 0)
  1747. (void)lbnSubN_64(n, mod, mlen);
  1748. }
  1749. #define lbnMontReduce_64 lbnMontReduce_64
  1750. #endif
  1751. /*
  1752. * Montgomery reduce n, modulo mod. This reduces modulo mod and divides by
  1753. * 2^(64*mlen). Returns the result in the *top* mlen words of the argument n.
  1754. * This is ready for another multiplication using lbnMul_64.
  1755. *
  1756. * Montgomery representation is a very useful way to encode numbers when
  1757. * you're doing lots of modular reduction. What you do is pick a multiplier
  1758. * R which is relatively prime to the modulus and very easy to divide by.
  1759. * Since the modulus is odd, R is closen as a power of 2, so the division
  1760. * is a shift. In fact, it's a shift of an integral number of words,
  1761. * so the shift can be implicit - just drop the low-order words.
  1762. *
  1763. * Now, choose R *larger* than the modulus m, 2^(64*mlen). Then convert
  1764. * all numbers a, b, etc. to Montgomery form M(a), M(b), etc using the
  1765. * relationship M(a) = a*R mod m, M(b) = b*R mod m, etc. Note that:
  1766. * - The Montgomery form of a number depends on the modulus m.
  1767. * A fixed modulus m is assumed throughout this discussion.
  1768. * - Since R is relaitvely prime to m, multiplication by R is invertible;
  1769. * no information about the numbers is lost, they're just scrambled.
  1770. * - Adding (and subtracting) numbers in this form works just as usual.
  1771. * M(a+b) = (a+b)*R mod m = (a*R + b*R) mod m = (M(a) + M(b)) mod m
  1772. * - Multiplying numbers in this form produces a*b*R*R. The problem
  1773. * is to divide out the excess factor of R, modulo m as well as to
  1774. * reduce to the given length mlen. It turns out that this can be
  1775. * done *faster* than a normal divide, which is where the speedup
  1776. * in Montgomery division comes from.
  1777. *
  1778. * Normal reduction chooses a most-significant quotient digit q and then
  1779. * subtracts q*m from the number to be reduced. Choosing q is tricky
  1780. * and involved (just look at lbnDiv_64 to see!) and is usually
  1781. * imperfect, requiring a check for correction after the subtraction.
  1782. *
  1783. * Montgomery reduction *adds* a multiple of m to the *low-order* part
  1784. * of the number to be reduced. This multiple is chosen to make the
  1785. * low-order part of the number come out to zero. This can be done
  1786. * with no trickery or error using a precomputed inverse of the modulus.
  1787. * In this code, the "part" is one word, but any width can be used.
  1788. *
  1789. * Repeating this step sufficiently often results in a value which
  1790. * is a multiple of R (a power of two, remember) but is still (since
  1791. * the additions were to the low-order part and thus did not increase
  1792. * the value of the number being reduced very much) still not much
  1793. * larger than m*R. Then implicitly divide by R and subtract off
  1794. * m until the result is in the correct range.
  1795. *
  1796. * Since the low-order part being cancelled is less than R, the
  1797. * multiple of m added must have a multiplier which is at most R-1.
  1798. * Assuming that the input is at most m*R-1, the final number is
  1799. * at most m*(2*R-1)-1 = 2*m*R - m - 1, so subtracting m once from
  1800. * the high-order part, equivalent to subtracting m*R from the
  1801. * while number, produces a result which is at most m*R - m - 1,
  1802. * which divided by R is at most m-1.
  1803. *
  1804. * To convert *to* Montgomery form, you need a regular remainder
  1805. * routine, although you can just compute R*R (mod m) and do the
  1806. * conversion using Montgomery multiplication. To convert *from*
  1807. * Montgomery form, just Montgomery reduce the number to
  1808. * remove the extra factor of R.
  1809. *
  1810. * TODO: Change to a full inverse and use Karatsuba's multiplication
  1811. * rather than this word-at-a-time.
  1812. */
  1813. #ifndef lbnMontReduce_64
  1814. void
  1815. lbnMontReduce_64(BNWORD64 *n, BNWORD64 const *mod, unsigned const mlen,
  1816. BNWORD64 inv)
  1817. {
  1818. BNWORD64 t;
  1819. BNWORD64 c = 0;
  1820. unsigned len = mlen;
  1821. /* inv must be the negative inverse of mod's least significant word */
  1822. assert((BNWORD64)(inv * BIGLITTLE(mod[-1],mod[0])) == (BNWORD64)-1);
  1823. assert(len);
  1824. do {
  1825. t = lbnMulAdd1_64(n, mod, mlen, inv * BIGLITTLE(n[-1],n[0]));
  1826. c += lbnAdd1_64(BIGLITTLE(n-mlen,n+mlen), len, t);
  1827. BIGLITTLE(--n,++n);
  1828. } while (--len);
  1829. /*
  1830. * All that adding can cause an overflow past the modulus size,
  1831. * but it's unusual, and never by much, so a subtraction loop
  1832. * is the right way to deal with it.
  1833. * This subtraction happens infrequently - I've only ever seen it
  1834. * invoked once per reduction, and then just under 22.5% of the time.
  1835. */
  1836. while (c)
  1837. c -= lbnSubN_64(n, mod, mlen);
  1838. while (lbnCmp_64(n, mod, mlen) >= 0)
  1839. (void)lbnSubN_64(n, mod, mlen);
  1840. }
  1841. #endif /* !lbnMontReduce_64 */
  1842. /*
  1843. * A couple of helpers that you might want to implement atomically
  1844. * in asm sometime.
  1845. */
  1846. #ifndef lbnMontMul_64
  1847. /*
  1848. * Multiply "num1" by "num2", modulo "mod", all of length "len", and
  1849. * place the result in the high half of "prod". "inv" is the inverse
  1850. * of the least-significant word of the modulus, modulo 2^64.
  1851. * This uses numbers in Montgomery form. Reduce using "len" and "inv".
  1852. *
  1853. * This is implemented as a macro to win on compilers that don't do
  1854. * inlining, since it's so trivial.
  1855. */
  1856. #define lbnMontMul_64(prod, n1, n2, mod, len, inv) \
  1857. (lbnMulX_64(prod, n1, n2, len), lbnMontReduce_64(prod, mod, len, inv))
  1858. #endif /* !lbnMontMul_64 */
  1859. #ifndef lbnMontSquare_64
  1860. /*
  1861. * Square "n", modulo "mod", both of length "len", and place the result
  1862. * in the high half of "prod". "inv" is the inverse of the least-significant
  1863. * word of the modulus, modulo 2^64.
  1864. * This uses numbers in Montgomery form. Reduce using "len" and "inv".
  1865. *
  1866. * This is implemented as a macro to win on compilers that don't do
  1867. * inlining, since it's so trivial.
  1868. */
  1869. #define lbnMontSquare_64(prod, n, mod, len, inv) \
  1870. (lbnSquare_64(prod, n, len), lbnMontReduce_64(prod, mod, len, inv))
  1871. #endif /* !lbnMontSquare_64 */
  1872. /*
  1873. * Convert a number to Montgomery form - requires mlen + nlen words
  1874. * of memory in "n".
  1875. */
  1876. void
  1877. lbnToMont_64(BNWORD64 *n, unsigned nlen, BNWORD64 *mod, unsigned mlen)
  1878. {
  1879. /* Move n up "mlen" words */
  1880. lbnCopy_64(BIGLITTLE(n-mlen,n+mlen), n, nlen);
  1881. lbnZero_64(n, mlen);
  1882. /* Do the division - dump the quotient in the high-order words */
  1883. (void)lbnDiv_64(BIGLITTLE(n-mlen,n+mlen), n, mlen+nlen, mod, mlen);
  1884. }
  1885. /*
  1886. * Convert from Montgomery form. Montgomery reduction is all that is
  1887. * needed.
  1888. */
  1889. void
  1890. lbnFromMont_64(BNWORD64 *n, BNWORD64 *mod, unsigned len)
  1891. {
  1892. /* Zero the high words of n */
  1893. lbnZero_64(BIGLITTLE(n-len,n+len), len);
  1894. lbnMontReduce_64(n, mod, len, lbnMontInv1_64(mod[BIGLITTLE(-1,0)]));
  1895. /* Move n down len words */
  1896. lbnCopy_64(n, BIGLITTLE(n-len,n+len), len);
  1897. }
  1898. /*
  1899. * The windowed exponentiation algorithm, precomputes a table of odd
  1900. * powers of n up to 2^k. See the comment in bnExpMod_64 below for
  1901. * an explanation of how it actually works works.
  1902. *
  1903. * It takes 2^(k-1)-1 multiplies to compute the table, and (e-1)/(k+1)
  1904. * multiplies (on average) to perform the exponentiation. To minimize
  1905. * the sum, k must vary with e. The optimal window sizes vary with the
  1906. * exponent length. Here are some selected values and the boundary cases.
  1907. * (An underscore _ has been inserted into some of the numbers to ensure
  1908. * that magic strings like 64 do not appear in this table. It should be
  1909. * ignored.)
  1910. *
  1911. * At e = 1 bits, k=1 (0.000000) is best
  1912. * At e = 2 bits, k=1 (0.500000) is best
  1913. * At e = 4 bits, k=1 (1.500000) is best
  1914. * At e = 8 bits, k=2 (3.333333) < k=1 (3.500000)
  1915. * At e = 1_6 bits, k=2 (6.000000) is best
  1916. * At e = 26 bits, k=3 (9.250000) < k=2 (9.333333)
  1917. * At e = 3_2 bits, k=3 (10.750000) is best
  1918. * At e = 6_4 bits, k=3 (18.750000) is best
  1919. * At e = 82 bits, k=4 (23.200000) < k=3 (23.250000)
  1920. * At e = 128 bits, k=4 (3_2.400000) is best
  1921. * At e = 242 bits, k=5 (55.1_66667) < k=4 (55.200000)
  1922. * At e = 256 bits, k=5 (57.500000) is best
  1923. * At e = 512 bits, k=5 (100.1_66667) is best
  1924. * At e = 674 bits, k=6 (127.142857) < k=5 (127.1_66667)
  1925. * At e = 1024 bits, k=6 (177.142857) is best
  1926. * At e = 1794 bits, k=7 (287.125000) < k=6 (287.142857)
  1927. * At e = 2048 bits, k=7 (318.875000) is best
  1928. * At e = 4096 bits, k=7 (574.875000) is best
  1929. *
  1930. * The numbers in parentheses are the expected number of multiplications
  1931. * needed to do the computation. The normal russian-peasant modular
  1932. * exponentiation technique always uses (e-1)/2. For exponents as
  1933. * small as 192 bits (below the range of current factoring algorithms),
  1934. * half of the multiplies are eliminated, 45.2 as opposed to the naive
  1935. * 95.5. Counting the 191 squarings as 3/4 a multiply each (squaring
  1936. * proper is just over half of multiplying, but the Montgomery
  1937. * reduction in each case is also a multiply), that's 143.25
  1938. * multiplies, for totals of 188.45 vs. 238.75 - a 21% savings.
  1939. * For larger exponents (like 512 bits), it's 483.92 vs. 639.25, a
  1940. * 24.3% savings. It asymptotically approaches 25%.
  1941. *
  1942. * Um, actually there's a slightly more accurate way to count, which
  1943. * really is the average number of multiplies required, averaged
  1944. * uniformly over all 2^(e-1) e-bit numbers, from 2^(e-1) to (2^e)-1.
  1945. * It's based on the recurrence that for the last b bits, b <= k, at
  1946. * most one multiply is needed (and none at all 1/2^b of the time),
  1947. * while when b > k, the odds are 1/2 each way that the bit will be
  1948. * 0 (meaning no multiplies to reduce it to the b-1-bit case) and
  1949. * 1/2 that the bit will be 1, starting a k-bit window and requiring
  1950. * 1 multiply beyond the b-k-bit case. Since the most significant
  1951. * bit is always 1, a k-bit window always starts there, and that
  1952. * multiply is by 1, so it isn't a multiply at all. Thus, the
  1953. * number of multiplies is simply that needed for the last e-k bits.
  1954. * This recurrence produces:
  1955. *
  1956. * At e = 1 bits, k=1 (0.000000) is best
  1957. * At e = 2 bits, k=1 (0.500000) is best
  1958. * At e = 4 bits, k=1 (1.500000) is best
  1959. * At e = 6 bits, k=2 (2.437500) < k=1 (2.500000)
  1960. * At e = 8 bits, k=2 (3.109375) is best
  1961. * At e = 1_6 bits, k=2 (5.777771) is best
  1962. * At e = 24 bits, k=3 (8.437629) < k=2 (8.444444)
  1963. * At e = 3_2 bits, k=3 (10.437492) is best
  1964. * At e = 6_4 bits, k=3 (18.437500) is best
  1965. * At e = 81 bits, k=4 (22.6_40000) < k=3 (22.687500)
  1966. * At e = 128 bits, k=4 (3_2.040000) is best
  1967. * At e = 241 bits, k=5 (54.611111) < k=4 (54.6_40000)
  1968. * At e = 256 bits, k=5 (57.111111) is best
  1969. * At e = 512 bits, k=5 (99.777778) is best
  1970. * At e = 673 bits, k=6 (126.591837) < k=5 (126.611111)
  1971. * At e = 1024 bits, k=6 (176.734694) is best
  1972. * At e = 1793 bits, k=7 (286.578125) < k=6 (286.591837)
  1973. * At e = 2048 bits, k=7 (318.453125) is best
  1974. * At e = 4096 bits, k=7 (574.453125) is best
  1975. *
  1976. * This has the rollover points at 6, 24, 81, 241, 673 and 1793 instead
  1977. * of 8, 26, 82, 242, 674, and 1794. Not a very big difference.
  1978. * (The numbers past that are k=8 at 4609 and k=9 at 11521,
  1979. * vs. one more in each case for the approximation.)
  1980. *
  1981. * Given that exponents for which k>7 are useful are uncommon,
  1982. * a fixed size table for k <= 7 is used for simplicity.
  1983. *
  1984. * The basic number of squarings needed is e-1, although a k-bit
  1985. * window (for k > 1) can save, on average, k-2 of those, too.
  1986. * That savings currently isn't counted here. It would drive the
  1987. * crossover points slightly lower.
  1988. * (Actually, this win is also reduced in the DoubleExpMod case,
  1989. * meaning we'd have to split the tables. Except for that, the
  1990. * multiplies by powers of the two bases are independent, so
  1991. * the same logic applies to each as the single case.)
  1992. *
  1993. * Table entry i is the largest number of bits in an exponent to
  1994. * process with a window size of i+1. Entry 6 is the largest
  1995. * possible unsigned number, so the window will never be more
  1996. * than 7 bits, requiring 2^6 = 0x40 slots.
  1997. */
  1998. #define BNEXPMOD_MAX_WINDOW 7
  1999. static unsigned const bnExpModThreshTable[BNEXPMOD_MAX_WINDOW] = {
  2000. 5, 23, 80, 240, 672, 1792, (unsigned)-1
  2001. /* 7, 25, 81, 241, 673, 1793, (unsigned)-1 ### The old approximations */
  2002. };
  2003. /*
  2004. * Perform modular exponentiation, as fast as possible! This uses
  2005. * Montgomery reduction, optimized squaring, and windowed exponentiation.
  2006. * The modulus "mod" MUST be odd!
  2007. *
  2008. * This returns 0 on success, -1 on out of memory.
  2009. *
  2010. * The window algorithm:
  2011. * The idea is to keep a running product of b1 = n^(high-order bits of exp),
  2012. * and then keep appending exponent bits to it. The following patterns
  2013. * apply to a 3-bit window (k = 3):
  2014. * To append 0: square
  2015. * To append 1: square, multiply by n^1
  2016. * To append 10: square, multiply by n^1, square
  2017. * To append 11: square, square, multiply by n^3
  2018. * To append 100: square, multiply by n^1, square, square
  2019. * To append 101: square, square, square, multiply by n^5
  2020. * To append 110: square, square, multiply by n^3, square
  2021. * To append 111: square, square, square, multiply by n^7
  2022. *
  2023. * Since each pattern involves only one multiply, the longer the pattern
  2024. * the better, except that a 0 (no multiplies) can be appended directly.
  2025. * We precompute a table of odd powers of n, up to 2^k, and can then
  2026. * multiply k bits of exponent at a time. Actually, assuming random
  2027. * exponents, there is on average one zero bit between needs to
  2028. * multiply (1/2 of the time there's none, 1/4 of the time there's 1,
  2029. * 1/8 of the time, there's 2, 1/64 of the time, there's 3, etc.), so
  2030. * you have to do one multiply per k+1 bits of exponent.
  2031. *
  2032. * The loop walks down the exponent, squaring the result buffer as
  2033. * it goes. There is a wbits+1 bit lookahead buffer, buf, that is
  2034. * filled with the upcoming exponent bits. (What is read after the
  2035. * end of the exponent is unimportant, but it is filled with zero here.)
  2036. * When the most-significant bit of this buffer becomes set, i.e.
  2037. * (buf & tblmask) != 0, we have to decide what pattern to multiply
  2038. * by, and when to do it. We decide, remember to do it in future
  2039. * after a suitable number of squarings have passed (e.g. a pattern
  2040. * of "100" in the buffer requires that we multiply by n^1 immediately;
  2041. * a pattern of "110" calls for multiplying by n^3 after one more
  2042. * squaring), clear the buffer, and continue.
  2043. *
  2044. * When we start, there is one more optimization: the result buffer
  2045. * is implcitly one, so squaring it or multiplying by it can be
  2046. * optimized away. Further, if we start with a pattern like "100"
  2047. * in the lookahead window, rather than placing n into the buffer
  2048. * and then starting to square it, we have already computed n^2
  2049. * to compute the odd-powers table, so we can place that into
  2050. * the buffer and save a squaring.
  2051. *
  2052. * This means that if you have a k-bit window, to compute n^z,
  2053. * where z is the high k bits of the exponent, 1/2 of the time
  2054. * it requires no squarings. 1/4 of the time, it requires 1
  2055. * squaring, ... 1/2^(k-1) of the time, it reqires k-2 squarings.
  2056. * And the remaining 1/2^(k-1) of the time, the top k bits are a
  2057. * 1 followed by k-1 0 bits, so it again only requires k-2
  2058. * squarings, not k-1. The average of these is 1. Add that
  2059. * to the one squaring we have to do to compute the table,
  2060. * and you'll see that a k-bit window saves k-2 squarings
  2061. * as well as reducing the multiplies. (It actually doesn't
  2062. * hurt in the case k = 1, either.)
  2063. *
  2064. * n must have mlen words allocated. Although fewer may be in use
  2065. * when n is passed in, all are in use on exit.
  2066. */
  2067. int
  2068. lbnExpMod_64(BNWORD64 *result, BNWORD64 const *n, unsigned nlen,
  2069. BNWORD64 const *e, unsigned elen, BNWORD64 *mod, unsigned mlen)
  2070. {
  2071. BNWORD64 *table[1 << (BNEXPMOD_MAX_WINDOW-1)];
  2072. /* Table of odd powers of n */
  2073. unsigned ebits; /* Exponent bits */
  2074. unsigned wbits; /* Window size */
  2075. unsigned tblmask; /* Mask of exponentiation window */
  2076. BNWORD64 bitpos; /* Mask of current look-ahead bit */
  2077. unsigned buf; /* Buffer of exponent bits */
  2078. unsigned multpos; /* Where to do pending multiply */
  2079. BNWORD64 const *mult; /* What to multiply by */
  2080. unsigned i; /* Loop counter */
  2081. int isone; /* Flag: accum. is implicitly one */
  2082. BNWORD64 *a, *b; /* Working buffers/accumulators */
  2083. BNWORD64 *t; /* Pointer into the working buffers */
  2084. BNWORD64 inv; /* mod^-1 modulo 2^64 */
  2085. int y; /* bnYield() result */
  2086. assert(mlen);
  2087. assert(nlen <= mlen);
  2088. /* First, a couple of trivial cases. */
  2089. elen = lbnNorm_64(e, elen);
  2090. if (!elen) {
  2091. /* x ^ 0 == 1 */
  2092. lbnZero_64(result, mlen);
  2093. BIGLITTLE(result[-1],result[0]) = 1;
  2094. return 0;
  2095. }
  2096. ebits = lbnBits_64(e, elen);
  2097. if (ebits == 1) {
  2098. /* x ^ 1 == x */
  2099. if (n != result)
  2100. lbnCopy_64(result, n, nlen);
  2101. if (mlen > nlen)
  2102. lbnZero_64(BIGLITTLE(result-nlen,result+nlen),
  2103. mlen-nlen);
  2104. return 0;
  2105. }
  2106. /* Okay, now move the exponent pointer to the most-significant word */
  2107. e = BIGLITTLE(e-elen, e+elen-1);
  2108. /* Look up appropriate k-1 for the exponent - tblmask = 1<<(k-1) */
  2109. wbits = 0;
  2110. while (ebits > bnExpModThreshTable[wbits])
  2111. wbits++;
  2112. /* Allocate working storage: two product buffers and the tables. */
  2113. LBNALLOC(a, BNWORD64, 2*mlen);
  2114. if (!a)
  2115. return -1;
  2116. LBNALLOC(b, BNWORD64, 2*mlen);
  2117. if (!b) {
  2118. LBNFREE(a, 2*mlen);
  2119. return -1;
  2120. }
  2121. /* Convert to the appropriate table size: tblmask = 1<<(k-1) */
  2122. tblmask = 1u << wbits;
  2123. /* We have the result buffer available, so use it. */
  2124. table[0] = result;
  2125. /*
  2126. * Okay, we now have a minimal-sized table - expand it.
  2127. * This is allowed to fail! If so, scale back the table size
  2128. * and proceed.
  2129. */
  2130. for (i = 1; i < tblmask; i++) {
  2131. LBNALLOC(t, BNWORD64, mlen);
  2132. if (!t) /* Out of memory! Quit the loop. */
  2133. break;
  2134. table[i] = t;
  2135. }
  2136. /* If we stopped, with i < tblmask, shrink the tables appropriately */
  2137. while (tblmask > i) {
  2138. wbits--;
  2139. tblmask >>= 1;
  2140. }
  2141. /* Free up our overallocations */
  2142. while (--i > tblmask)
  2143. LBNFREE(table[i], mlen);
  2144. /* Okay, fill in the table */
  2145. /* Compute the necessary modular inverse */
  2146. inv = lbnMontInv1_64(mod[BIGLITTLE(-1,0)]); /* LSW of modulus */
  2147. /* Convert n to Montgomery form */
  2148. /* Move n up "mlen" words into a */
  2149. t = BIGLITTLE(a-mlen, a+mlen);
  2150. lbnCopy_64(t, n, nlen);
  2151. lbnZero_64(a, mlen);
  2152. /* Do the division - lose the quotient into the high-order words */
  2153. (void)lbnDiv_64(t, a, mlen+nlen, mod, mlen);
  2154. /* Copy into first table entry */
  2155. lbnCopy_64(table[0], a, mlen);
  2156. /* Square a into b */
  2157. lbnMontSquare_64(b, a, mod, mlen, inv);
  2158. /* Use high half of b to initialize the table */
  2159. t = BIGLITTLE(b-mlen, b+mlen);
  2160. for (i = 1; i < tblmask; i++) {
  2161. lbnMontMul_64(a, t, table[i-1], mod, mlen, inv);
  2162. lbnCopy_64(table[i], BIGLITTLE(a-mlen, a+mlen), mlen);
  2163. #if BNYIELD
  2164. if (bnYield && (y = bnYield()) < 0)
  2165. goto yield;
  2166. #endif
  2167. }
  2168. /* We might use b = n^2 later... */
  2169. /* Initialze the fetch pointer */
  2170. bitpos = (BNWORD64)1 << ((ebits-1) & (64-1)); /* Initialize mask */
  2171. /* This should point to the msbit of e */
  2172. assert((*e & bitpos) != 0);
  2173. /*
  2174. * Pre-load the window. Becuase the window size is
  2175. * never larger than the exponent size, there is no need to
  2176. * detect running off the end of e in here.
  2177. *
  2178. * The read-ahead is controlled by elen and the bitpos mask.
  2179. * Note that this is *ahead* of ebits, which tracks the
  2180. * most significant end of the window. The purpose of this
  2181. * initialization is to get the two wbits+1 bits apart,
  2182. * like they should be.
  2183. *
  2184. * Note that bitpos and e1len together keep track of the
  2185. * lookahead read pointer in the exponent that is used here.
  2186. */
  2187. buf = 0;
  2188. for (i = 0; i <= wbits; i++) {
  2189. buf = (buf << 1) | ((*e & bitpos) != 0);
  2190. bitpos >>= 1;
  2191. if (!bitpos) {
  2192. BIGLITTLE(e++,e--);
  2193. bitpos = (BNWORD64)1 << (64-1);
  2194. elen--;
  2195. }
  2196. }
  2197. assert(buf & tblmask);
  2198. /*
  2199. * Set the pending multiply positions to a location that will
  2200. * never be encountered, thus ensuring that nothing will happen
  2201. * until the need for a multiply appears and one is scheduled.
  2202. */
  2203. multpos = ebits; /* A NULL value */
  2204. mult = 0; /* Force a crash if we use these */
  2205. /*
  2206. * Okay, now begins the real work. The first step is
  2207. * slightly magic, so it's done outside the main loop,
  2208. * but it's very similar to what's inside.
  2209. */
  2210. ebits--; /* Start processing the first bit... */
  2211. isone = 1;
  2212. /*
  2213. * This is just like the multiply in the loop, except that
  2214. * - We know the msbit of buf is set, and
  2215. * - We have the extra value n^2 floating around.
  2216. * So, do the usual computation, and if the result is that
  2217. * the buffer should be multiplied by n^1 immediately
  2218. * (which we'd normally then square), we multiply it
  2219. * (which reduces to a copy, which reduces to setting a flag)
  2220. * by n^2 and skip the squaring. Thus, we do the
  2221. * multiply and the squaring in one step.
  2222. */
  2223. assert(buf & tblmask);
  2224. multpos = ebits - wbits;
  2225. while ((buf & 1) == 0) {
  2226. buf >>= 1;
  2227. multpos++;
  2228. }
  2229. /* Intermediates can wrap, but final must NOT */
  2230. assert(multpos <= ebits);
  2231. mult = table[buf>>1];
  2232. buf = 0;
  2233. /* Special case: use already-computed value sitting in buffer */
  2234. if (multpos == ebits)
  2235. isone = 0;
  2236. /*
  2237. * At this point, the buffer (which is the high half of b) holds
  2238. * either 1 (implicitly, as the "isone" flag is set), or n^2.
  2239. */
  2240. /*
  2241. * The main loop. The procedure is:
  2242. * - Advance the window
  2243. * - If the most-significant bit of the window is set,
  2244. * schedule a multiply for the appropriate time in the
  2245. * future (may be immediately)
  2246. * - Perform any pending multiples
  2247. * - Check for termination
  2248. * - Square the buffer
  2249. *
  2250. * At any given time, the acumulated product is held in
  2251. * the high half of b.
  2252. */
  2253. for (;;) {
  2254. ebits--;
  2255. /* Advance the window */
  2256. assert(buf < tblmask);
  2257. buf <<= 1;
  2258. /*
  2259. * This reads ahead of the current exponent position
  2260. * (controlled by ebits), so we have to be able to read
  2261. * past the lsb of the exponents without error.
  2262. */
  2263. if (elen) {
  2264. buf |= ((*e & bitpos) != 0);
  2265. bitpos >>= 1;
  2266. if (!bitpos) {
  2267. BIGLITTLE(e++,e--);
  2268. bitpos = (BNWORD64)1 << (64-1);
  2269. elen--;
  2270. }
  2271. }
  2272. /* Examine the window for pending multiplies */
  2273. if (buf & tblmask) {
  2274. multpos = ebits - wbits;
  2275. while ((buf & 1) == 0) {
  2276. buf >>= 1;
  2277. multpos++;
  2278. }
  2279. /* Intermediates can wrap, but final must NOT */
  2280. assert(multpos <= ebits);
  2281. mult = table[buf>>1];
  2282. buf = 0;
  2283. }
  2284. /* If we have a pending multiply, do it */
  2285. if (ebits == multpos) {
  2286. /* Multiply by the table entry remembered previously */
  2287. t = BIGLITTLE(b-mlen, b+mlen);
  2288. if (isone) {
  2289. /* Multiply by 1 is a trivial case */
  2290. lbnCopy_64(t, mult, mlen);
  2291. isone = 0;
  2292. } else {
  2293. lbnMontMul_64(a, t, mult, mod, mlen, inv);
  2294. /* Swap a and b */
  2295. t = a; a = b; b = t;
  2296. }
  2297. }
  2298. /* Are we done? */
  2299. if (!ebits)
  2300. break;
  2301. /* Square the input */
  2302. if (!isone) {
  2303. t = BIGLITTLE(b-mlen, b+mlen);
  2304. lbnMontSquare_64(a, t, mod, mlen, inv);
  2305. /* Swap a and b */
  2306. t = a; a = b; b = t;
  2307. }
  2308. #if BNYIELD
  2309. if (bnYield && (y = bnYield()) < 0)
  2310. goto yield;
  2311. #endif
  2312. } /* for (;;) */
  2313. assert(!isone);
  2314. assert(!buf);
  2315. /* DONE! */
  2316. /* Convert result out of Montgomery form */
  2317. t = BIGLITTLE(b-mlen, b+mlen);
  2318. lbnCopy_64(b, t, mlen);
  2319. lbnZero_64(t, mlen);
  2320. lbnMontReduce_64(b, mod, mlen, inv);
  2321. lbnCopy_64(result, t, mlen);
  2322. /*
  2323. * Clean up - free intermediate storage.
  2324. * Do NOT free table[0], which is the result
  2325. * buffer.
  2326. */
  2327. y = 0;
  2328. #if BNYIELD
  2329. yield:
  2330. #endif
  2331. while (--tblmask)
  2332. LBNFREE(table[tblmask], mlen);
  2333. LBNFREE(b, 2*mlen);
  2334. LBNFREE(a, 2*mlen);
  2335. return y; /* Success */
  2336. }
  2337. /*
  2338. * Compute and return n1^e1 * n2^e2 mod "mod".
  2339. * result may be either input buffer, or something separate.
  2340. * It must be "mlen" words long.
  2341. *
  2342. * There is a current position in the exponents, which is kept in e1bits.
  2343. * (The exponents are swapped if necessary so e1 is the longer of the two.)
  2344. * At any given time, the value in the accumulator is
  2345. * n1^(e1>>e1bits) * n2^(e2>>e1bits) mod "mod".
  2346. * As e1bits is counted down, this is updated, by squaring it and doing
  2347. * any necessary multiplies.
  2348. * To decide on the necessary multiplies, two windows, each w1bits+1 bits
  2349. * wide, are maintained in buf1 and buf2, which read *ahead* of the
  2350. * e1bits position (with appropriate handling of the case when e1bits
  2351. * drops below w1bits+1). When the most-significant bit of either window
  2352. * becomes set, indicating that something needs to be multiplied by
  2353. * the accumulator or it will get out of sync, the window is examined
  2354. * to see which power of n1 or n2 to multiply by, and when (possibly
  2355. * later, if the power is greater than 1) the multiply should take
  2356. * place. Then the multiply and its location are remembered and the
  2357. * window is cleared.
  2358. *
  2359. * If we had every power of n1 in the table, the multiply would always
  2360. * be w1bits steps in the future. But we only keep the odd powers,
  2361. * so instead of waiting w1bits squarings and then multiplying
  2362. * by n1^k, we wait w1bits-k squarings and multiply by n1.
  2363. *
  2364. * Actually, w2bits can be less than w1bits, but the window is the same
  2365. * size, to make it easier to keep track of where we're reading. The
  2366. * appropriate number of low-order bits of the window are just ignored.
  2367. */
  2368. int
  2369. lbnDoubleExpMod_64(BNWORD64 *result,
  2370. BNWORD64 const *n1, unsigned n1len,
  2371. BNWORD64 const *e1, unsigned e1len,
  2372. BNWORD64 const *n2, unsigned n2len,
  2373. BNWORD64 const *e2, unsigned e2len,
  2374. BNWORD64 *mod, unsigned mlen)
  2375. {
  2376. BNWORD64 *table1[1 << (BNEXPMOD_MAX_WINDOW-1)];
  2377. /* Table of odd powers of n1 */
  2378. BNWORD64 *table2[1 << (BNEXPMOD_MAX_WINDOW-1)];
  2379. /* Table of odd powers of n2 */
  2380. unsigned e1bits, e2bits; /* Exponent bits */
  2381. unsigned w1bits, w2bits; /* Window sizes */
  2382. unsigned tblmask; /* Mask of exponentiation window */
  2383. BNWORD64 bitpos; /* Mask of current look-ahead bit */
  2384. unsigned buf1, buf2; /* Buffer of exponent bits */
  2385. unsigned mult1pos, mult2pos; /* Where to do pending multiply */
  2386. BNWORD64 const *mult1, *mult2; /* What to multiply by */
  2387. unsigned i; /* Loop counter */
  2388. int isone; /* Flag: accum. is implicitly one */
  2389. BNWORD64 *a, *b; /* Working buffers/accumulators */
  2390. BNWORD64 *t; /* Pointer into the working buffers */
  2391. BNWORD64 inv; /* mod^-1 modulo 2^64 */
  2392. int y; /* bnYield() result */
  2393. assert(mlen);
  2394. assert(n1len <= mlen);
  2395. assert(n2len <= mlen);
  2396. /* First, a couple of trivial cases. */
  2397. e1len = lbnNorm_64(e1, e1len);
  2398. e2len = lbnNorm_64(e2, e2len);
  2399. /* Ensure that the first exponent is the longer */
  2400. e1bits = lbnBits_64(e1, e1len);
  2401. e2bits = lbnBits_64(e2, e2len);
  2402. if (e1bits < e2bits) {
  2403. i = e1len; e1len = e2len; e2len = i;
  2404. i = e1bits; e1bits = e2bits; e2bits = i;
  2405. t = (BNWORD64 *)n1; n1 = n2; n2 = t;
  2406. t = (BNWORD64 *)e1; e1 = e2; e2 = t;
  2407. }
  2408. assert(e1bits >= e2bits);
  2409. /* Handle a trivial case */
  2410. if (!e2len)
  2411. return lbnExpMod_64(result, n1, n1len, e1, e1len, mod, mlen);
  2412. assert(e2bits);
  2413. /* The code below fucks up if the exponents aren't at least 2 bits */
  2414. if (e1bits == 1) {
  2415. assert(e2bits == 1);
  2416. LBNALLOC(a, BNWORD64, n1len+n2len);
  2417. if (!a)
  2418. return -1;
  2419. lbnMul_64(a, n1, n1len, n2, n2len);
  2420. /* Do a direct modular reduction */
  2421. if (n1len + n2len >= mlen)
  2422. (void)lbnDiv_64(a+mlen, a, n1len+n2len, mod, mlen);
  2423. lbnCopy_64(result, a, mlen);
  2424. LBNFREE(a, n1len+n2len);
  2425. return 0;
  2426. }
  2427. /* Okay, now move the exponent pointers to the most-significant word */
  2428. e1 = BIGLITTLE(e1-e1len, e1+e1len-1);
  2429. e2 = BIGLITTLE(e2-e2len, e2+e2len-1);
  2430. /* Look up appropriate k-1 for the exponent - tblmask = 1<<(k-1) */
  2431. w1bits = 0;
  2432. while (e1bits > bnExpModThreshTable[w1bits])
  2433. w1bits++;
  2434. w2bits = 0;
  2435. while (e2bits > bnExpModThreshTable[w2bits])
  2436. w2bits++;
  2437. assert(w1bits >= w2bits);
  2438. /* Allocate working storage: two product buffers and the tables. */
  2439. LBNALLOC(a, BNWORD64, 2*mlen);
  2440. if (!a)
  2441. return -1;
  2442. LBNALLOC(b, BNWORD64, 2*mlen);
  2443. if (!b) {
  2444. LBNFREE(a, 2*mlen);
  2445. return -1;
  2446. }
  2447. /* Convert to the appropriate table size: tblmask = 1<<(k-1) */
  2448. tblmask = 1u << w1bits;
  2449. /* Use buf2 for its size, temporarily */
  2450. buf2 = 1u << w2bits;
  2451. LBNALLOC(t, BNWORD64, mlen);
  2452. if (!t) {
  2453. LBNFREE(b, 2*mlen);
  2454. LBNFREE(a, 2*mlen);
  2455. return -1;
  2456. }
  2457. table1[0] = t;
  2458. table2[0] = result;
  2459. /*
  2460. * Okay, we now have some minimal-sized tables - expand them.
  2461. * This is allowed to fail! If so, scale back the table sizes
  2462. * and proceed. We allocate both tables at the same time
  2463. * so if it fails partway through, they'll both be a reasonable
  2464. * size rather than one huge and one tiny.
  2465. * When i passes buf2 (the number of entries in the e2 window,
  2466. * which may be less than the number of entries in the e1 window),
  2467. * stop allocating e2 space.
  2468. */
  2469. for (i = 1; i < tblmask; i++) {
  2470. LBNALLOC(t, BNWORD64, mlen);
  2471. if (!t) /* Out of memory! Quit the loop. */
  2472. break;
  2473. table1[i] = t;
  2474. if (i < buf2) {
  2475. LBNALLOC(t, BNWORD64, mlen);
  2476. if (!t) {
  2477. LBNFREE(table1[i], mlen);
  2478. break;
  2479. }
  2480. table2[i] = t;
  2481. }
  2482. }
  2483. /* If we stopped, with i < tblmask, shrink the tables appropriately */
  2484. while (tblmask > i) {
  2485. w1bits--;
  2486. tblmask >>= 1;
  2487. }
  2488. /* Free up our overallocations */
  2489. while (--i > tblmask) {
  2490. if (i < buf2)
  2491. LBNFREE(table2[i], mlen);
  2492. LBNFREE(table1[i], mlen);
  2493. }
  2494. /* And shrink the second window too, if needed */
  2495. if (w2bits > w1bits) {
  2496. w2bits = w1bits;
  2497. buf2 = tblmask;
  2498. }
  2499. /*
  2500. * From now on, use the w2bits variable for the difference
  2501. * between w1bits and w2bits.
  2502. */
  2503. w2bits = w1bits-w2bits;
  2504. /* Okay, fill in the tables */
  2505. /* Compute the necessary modular inverse */
  2506. inv = lbnMontInv1_64(mod[BIGLITTLE(-1,0)]); /* LSW of modulus */
  2507. /* Convert n1 to Montgomery form */
  2508. /* Move n1 up "mlen" words into a */
  2509. t = BIGLITTLE(a-mlen, a+mlen);
  2510. lbnCopy_64(t, n1, n1len);
  2511. lbnZero_64(a, mlen);
  2512. /* Do the division - lose the quotient into the high-order words */
  2513. (void)lbnDiv_64(t, a, mlen+n1len, mod, mlen);
  2514. /* Copy into first table entry */
  2515. lbnCopy_64(table1[0], a, mlen);
  2516. /* Square a into b */
  2517. lbnMontSquare_64(b, a, mod, mlen, inv);
  2518. /* Use high half of b to initialize the first table */
  2519. t = BIGLITTLE(b-mlen, b+mlen);
  2520. for (i = 1; i < tblmask; i++) {
  2521. lbnMontMul_64(a, t, table1[i-1], mod, mlen, inv);
  2522. lbnCopy_64(table1[i], BIGLITTLE(a-mlen, a+mlen), mlen);
  2523. #if BNYIELD
  2524. if (bnYield && (y = bnYield()) < 0)
  2525. goto yield;
  2526. #endif
  2527. }
  2528. /* Convert n2 to Montgomery form */
  2529. t = BIGLITTLE(a-mlen, a+mlen);
  2530. /* Move n2 up "mlen" words into a */
  2531. lbnCopy_64(t, n2, n2len);
  2532. lbnZero_64(a, mlen);
  2533. /* Do the division - lose the quotient into the high-order words */
  2534. (void)lbnDiv_64(t, a, mlen+n2len, mod, mlen);
  2535. /* Copy into first table entry */
  2536. lbnCopy_64(table2[0], a, mlen);
  2537. /* Square it into a */
  2538. lbnMontSquare_64(a, table2[0], mod, mlen, inv);
  2539. /* Copy to b, low half */
  2540. lbnCopy_64(b, t, mlen);
  2541. /* Use b to initialize the second table */
  2542. for (i = 1; i < buf2; i++) {
  2543. lbnMontMul_64(a, b, table2[i-1], mod, mlen, inv);
  2544. lbnCopy_64(table2[i], t, mlen);
  2545. #if BNYIELD
  2546. if (bnYield && (y = bnYield()) < 0)
  2547. goto yield;
  2548. #endif
  2549. }
  2550. /*
  2551. * Okay, a recap: at this point, the low part of b holds
  2552. * n2^2, the high part holds n1^2, and the tables are
  2553. * initialized with the odd powers of n1 and n2 from 1
  2554. * through 2*tblmask-1 and 2*buf2-1.
  2555. *
  2556. * We might use those squares in b later, or we might not.
  2557. */
  2558. /* Initialze the fetch pointer */
  2559. bitpos = (BNWORD64)1 << ((e1bits-1) & (64-1)); /* Initialize mask */
  2560. /* This should point to the msbit of e1 */
  2561. assert((*e1 & bitpos) != 0);
  2562. /*
  2563. * Pre-load the windows. Becuase the window size is
  2564. * never larger than the exponent size, there is no need to
  2565. * detect running off the end of e1 in here.
  2566. *
  2567. * The read-ahead is controlled by e1len and the bitpos mask.
  2568. * Note that this is *ahead* of e1bits, which tracks the
  2569. * most significant end of the window. The purpose of this
  2570. * initialization is to get the two w1bits+1 bits apart,
  2571. * like they should be.
  2572. *
  2573. * Note that bitpos and e1len together keep track of the
  2574. * lookahead read pointer in the exponent that is used here.
  2575. * e2len is not decremented, it is only ever compared with
  2576. * e1len as *that* is decremented.
  2577. */
  2578. buf1 = buf2 = 0;
  2579. for (i = 0; i <= w1bits; i++) {
  2580. buf1 = (buf1 << 1) | ((*e1 & bitpos) != 0);
  2581. if (e1len <= e2len)
  2582. buf2 = (buf2 << 1) | ((*e2 & bitpos) != 0);
  2583. bitpos >>= 1;
  2584. if (!bitpos) {
  2585. BIGLITTLE(e1++,e1--);
  2586. if (e1len <= e2len)
  2587. BIGLITTLE(e2++,e2--);
  2588. bitpos = (BNWORD64)1 << (64-1);
  2589. e1len--;
  2590. }
  2591. }
  2592. assert(buf1 & tblmask);
  2593. /*
  2594. * Set the pending multiply positions to a location that will
  2595. * never be encountered, thus ensuring that nothing will happen
  2596. * until the need for a multiply appears and one is scheduled.
  2597. */
  2598. mult1pos = mult2pos = e1bits; /* A NULL value */
  2599. mult1 = mult2 = 0; /* Force a crash if we use these */
  2600. /*
  2601. * Okay, now begins the real work. The first step is
  2602. * slightly magic, so it's done outside the main loop,
  2603. * but it's very similar to what's inside.
  2604. */
  2605. isone = 1; /* Buffer is implicitly 1, so replace * by copy */
  2606. e1bits--; /* Start processing the first bit... */
  2607. /*
  2608. * This is just like the multiply in the loop, except that
  2609. * - We know the msbit of buf1 is set, and
  2610. * - We have the extra value n1^2 floating around.
  2611. * So, do the usual computation, and if the result is that
  2612. * the buffer should be multiplied by n1^1 immediately
  2613. * (which we'd normally then square), we multiply it
  2614. * (which reduces to a copy, which reduces to setting a flag)
  2615. * by n1^2 and skip the squaring. Thus, we do the
  2616. * multiply and the squaring in one step.
  2617. */
  2618. assert(buf1 & tblmask);
  2619. mult1pos = e1bits - w1bits;
  2620. while ((buf1 & 1) == 0) {
  2621. buf1 >>= 1;
  2622. mult1pos++;
  2623. }
  2624. /* Intermediates can wrap, but final must NOT */
  2625. assert(mult1pos <= e1bits);
  2626. mult1 = table1[buf1>>1];
  2627. buf1 = 0;
  2628. /* Special case: use already-computed value sitting in buffer */
  2629. if (mult1pos == e1bits)
  2630. isone = 0;
  2631. /*
  2632. * The first multiply by a power of n2. Similar, but
  2633. * we might not even want to schedule a multiply if e2 is
  2634. * shorter than e1, and the window might be shorter so
  2635. * we have to leave the low w2bits bits alone.
  2636. */
  2637. if (buf2 & tblmask) {
  2638. /* Remember low-order bits for later */
  2639. i = buf2 & ((1u << w2bits) - 1);
  2640. buf2 >>= w2bits;
  2641. mult2pos = e1bits - w1bits + w2bits;
  2642. while ((buf2 & 1) == 0) {
  2643. buf2 >>= 1;
  2644. mult2pos++;
  2645. }
  2646. assert(mult2pos <= e1bits);
  2647. mult2 = table2[buf2>>1];
  2648. buf2 = i;
  2649. if (mult2pos == e1bits) {
  2650. t = BIGLITTLE(b-mlen, b+mlen);
  2651. if (isone) {
  2652. lbnCopy_64(t, b, mlen); /* Copy low to high */
  2653. isone = 0;
  2654. } else {
  2655. lbnMontMul_64(a, t, b, mod, mlen, inv);
  2656. t = a; a = b; b = t;
  2657. }
  2658. }
  2659. }
  2660. /*
  2661. * At this point, the buffer (which is the high half of b)
  2662. * holds either 1 (implicitly, as the "isone" flag is set),
  2663. * n1^2, n2^2 or n1^2 * n2^2.
  2664. */
  2665. /*
  2666. * The main loop. The procedure is:
  2667. * - Advance the windows
  2668. * - If the most-significant bit of a window is set,
  2669. * schedule a multiply for the appropriate time in the
  2670. * future (may be immediately)
  2671. * - Perform any pending multiples
  2672. * - Check for termination
  2673. * - Square the buffers
  2674. *
  2675. * At any given time, the acumulated product is held in
  2676. * the high half of b.
  2677. */
  2678. for (;;) {
  2679. e1bits--;
  2680. /* Advance the windows */
  2681. assert(buf1 < tblmask);
  2682. buf1 <<= 1;
  2683. assert(buf2 < tblmask);
  2684. buf2 <<= 1;
  2685. /*
  2686. * This reads ahead of the current exponent position
  2687. * (controlled by e1bits), so we have to be able to read
  2688. * past the lsb of the exponents without error.
  2689. */
  2690. if (e1len) {
  2691. buf1 |= ((*e1 & bitpos) != 0);
  2692. if (e1len <= e2len)
  2693. buf2 |= ((*e2 & bitpos) != 0);
  2694. bitpos >>= 1;
  2695. if (!bitpos) {
  2696. BIGLITTLE(e1++,e1--);
  2697. if (e1len <= e2len)
  2698. BIGLITTLE(e2++,e2--);
  2699. bitpos = (BNWORD64)1 << (64-1);
  2700. e1len--;
  2701. }
  2702. }
  2703. /* Examine the first window for pending multiplies */
  2704. if (buf1 & tblmask) {
  2705. mult1pos = e1bits - w1bits;
  2706. while ((buf1 & 1) == 0) {
  2707. buf1 >>= 1;
  2708. mult1pos++;
  2709. }
  2710. /* Intermediates can wrap, but final must NOT */
  2711. assert(mult1pos <= e1bits);
  2712. mult1 = table1[buf1>>1];
  2713. buf1 = 0;
  2714. }
  2715. /*
  2716. * Examine the second window for pending multiplies.
  2717. * Window 2 can be smaller than window 1, but we
  2718. * keep the same number of bits in buf2, so we need
  2719. * to ignore any low-order bits in the buffer when
  2720. * computing what to multiply by, and recompute them
  2721. * later.
  2722. */
  2723. if (buf2 & tblmask) {
  2724. /* Remember low-order bits for later */
  2725. i = buf2 & ((1u << w2bits) - 1);
  2726. buf2 >>= w2bits;
  2727. mult2pos = e1bits - w1bits + w2bits;
  2728. while ((buf2 & 1) == 0) {
  2729. buf2 >>= 1;
  2730. mult2pos++;
  2731. }
  2732. assert(mult2pos <= e1bits);
  2733. mult2 = table2[buf2>>1];
  2734. buf2 = i;
  2735. }
  2736. /* If we have a pending multiply for e1, do it */
  2737. if (e1bits == mult1pos) {
  2738. /* Multiply by the table entry remembered previously */
  2739. t = BIGLITTLE(b-mlen, b+mlen);
  2740. if (isone) {
  2741. /* Multiply by 1 is a trivial case */
  2742. lbnCopy_64(t, mult1, mlen);
  2743. isone = 0;
  2744. } else {
  2745. lbnMontMul_64(a, t, mult1, mod, mlen, inv);
  2746. /* Swap a and b */
  2747. t = a; a = b; b = t;
  2748. }
  2749. }
  2750. /* If we have a pending multiply for e2, do it */
  2751. if (e1bits == mult2pos) {
  2752. /* Multiply by the table entry remembered previously */
  2753. t = BIGLITTLE(b-mlen, b+mlen);
  2754. if (isone) {
  2755. /* Multiply by 1 is a trivial case */
  2756. lbnCopy_64(t, mult2, mlen);
  2757. isone = 0;
  2758. } else {
  2759. lbnMontMul_64(a, t, mult2, mod, mlen, inv);
  2760. /* Swap a and b */
  2761. t = a; a = b; b = t;
  2762. }
  2763. }
  2764. /* Are we done? */
  2765. if (!e1bits)
  2766. break;
  2767. /* Square the buffer */
  2768. if (!isone) {
  2769. t = BIGLITTLE(b-mlen, b+mlen);
  2770. lbnMontSquare_64(a, t, mod, mlen, inv);
  2771. /* Swap a and b */
  2772. t = a; a = b; b = t;
  2773. }
  2774. #if BNYIELD
  2775. if (bnYield && (y = bnYield()) < 0)
  2776. goto yield;
  2777. #endif
  2778. } /* for (;;) */
  2779. assert(!isone);
  2780. assert(!buf1);
  2781. assert(!buf2);
  2782. /* DONE! */
  2783. /* Convert result out of Montgomery form */
  2784. t = BIGLITTLE(b-mlen, b+mlen);
  2785. lbnCopy_64(b, t, mlen);
  2786. lbnZero_64(t, mlen);
  2787. lbnMontReduce_64(b, mod, mlen, inv);
  2788. lbnCopy_64(result, t, mlen);
  2789. /* Clean up - free intermediate storage */
  2790. y = 0;
  2791. #if BNYIELD
  2792. yield:
  2793. #endif
  2794. buf2 = tblmask >> w2bits;
  2795. while (--tblmask) {
  2796. if (tblmask < buf2)
  2797. LBNFREE(table2[tblmask], mlen);
  2798. LBNFREE(table1[tblmask], mlen);
  2799. }
  2800. t = table1[0];
  2801. LBNFREE(t, mlen);
  2802. LBNFREE(b, 2*mlen);
  2803. LBNFREE(a, 2*mlen);
  2804. return y; /* Success */
  2805. }
  2806. /*
  2807. * 2^exp (mod mod). This is an optimized version for use in Fermat
  2808. * tests. The input value of n is ignored; it is returned with
  2809. * "mlen" words valid.
  2810. */
  2811. int
  2812. lbnTwoExpMod_64(BNWORD64 *n, BNWORD64 const *exp, unsigned elen,
  2813. BNWORD64 *mod, unsigned mlen)
  2814. {
  2815. unsigned e; /* Copy of high words of the exponent */
  2816. unsigned bits; /* Assorted counter of bits */
  2817. BNWORD64 const *bitptr;
  2818. BNWORD64 bitword, bitpos;
  2819. BNWORD64 *a, *b, *a1;
  2820. BNWORD64 inv;
  2821. int y; /* Result of bnYield() */
  2822. assert(mlen);
  2823. bitptr = BIGLITTLE(exp-elen, exp+elen-1);
  2824. bitword = *bitptr;
  2825. assert(bitword);
  2826. /* Clear n for future use. */
  2827. lbnZero_64(n, mlen);
  2828. bits = lbnBits_64(exp, elen);
  2829. /* First, a couple of trivial cases. */
  2830. if (bits <= 1) {
  2831. /* 2 ^ 0 == 1, 2 ^ 1 == 2 */
  2832. BIGLITTLE(n[-1],n[0]) = (BNWORD64)1<<elen;
  2833. return 0;
  2834. }
  2835. /* Set bitpos to the most significant bit */
  2836. bitpos = (BNWORD64)1 << ((bits-1) & (64-1));
  2837. /* Now, count the bits in the modulus. */
  2838. bits = lbnBits_64(mod, mlen);
  2839. assert(bits > 1); /* a 1-bit modulus is just stupid... */
  2840. /*
  2841. * We start with 1<<e, where "e" is as many high bits of the
  2842. * exponent as we can manage without going over the modulus.
  2843. * This first loop finds "e".
  2844. */
  2845. e = 1;
  2846. while (elen) {
  2847. /* Consume the first bit */
  2848. bitpos >>= 1;
  2849. if (!bitpos) {
  2850. if (!--elen)
  2851. break;
  2852. bitword = BIGLITTLE(*++bitptr,*--bitptr);
  2853. bitpos = (BNWORD64)1<<(64-1);
  2854. }
  2855. e = (e << 1) | ((bitpos & bitword) != 0);
  2856. if (e >= bits) { /* Overflow! Back out. */
  2857. e >>= 1;
  2858. break;
  2859. }
  2860. }
  2861. /*
  2862. * The bit in "bitpos" being examined by the bit buffer has NOT
  2863. * been consumed yet. This may be past the end of the exponent,
  2864. * in which case elen == 1.
  2865. */
  2866. /* Okay, now, set bit "e" in n. n is already zero. */
  2867. inv = (BNWORD64)1 << (e & (64-1));
  2868. e /= 64;
  2869. BIGLITTLE(n[-e-1],n[e]) = inv;
  2870. /*
  2871. * The effective length of n in words is now "e+1".
  2872. * This is used a little bit later.
  2873. */
  2874. if (!elen)
  2875. return 0; /* That was easy! */
  2876. /*
  2877. * We have now processed the first few bits. The next step
  2878. * is to convert this to Montgomery form for further squaring.
  2879. */
  2880. /* Allocate working storage: two product buffers */
  2881. LBNALLOC(a, BNWORD64, 2*mlen);
  2882. if (!a)
  2883. return -1;
  2884. LBNALLOC(b, BNWORD64, 2*mlen);
  2885. if (!b) {
  2886. LBNFREE(a, 2*mlen);
  2887. return -1;
  2888. }
  2889. /* Convert n to Montgomery form */
  2890. inv = BIGLITTLE(mod[-1],mod[0]); /* LSW of modulus */
  2891. assert(inv & 1); /* Modulus must be odd */
  2892. inv = lbnMontInv1_64(inv);
  2893. /* Move n (length e+1, remember?) up "mlen" words into b */
  2894. /* Note that we lie about a1 for a bit - it's pointing to b */
  2895. a1 = BIGLITTLE(b-mlen,b+mlen);
  2896. lbnCopy_64(a1, n, e+1);
  2897. lbnZero_64(b, mlen);
  2898. /* Do the division - dump the quotient into the high-order words */
  2899. (void)lbnDiv_64(a1, b, mlen+e+1, mod, mlen);
  2900. /*
  2901. * Now do the first squaring and modular reduction to put
  2902. * the number up in a1 where it belongs.
  2903. */
  2904. lbnMontSquare_64(a, b, mod, mlen, inv);
  2905. /* Fix up a1 to point to where it should go. */
  2906. a1 = BIGLITTLE(a-mlen,a+mlen);
  2907. /*
  2908. * Okay, now, a1 holds the number being accumulated, and
  2909. * b is a scratch register. Start working:
  2910. */
  2911. for (;;) {
  2912. /*
  2913. * Is the bit set? If so, double a1 as well.
  2914. * A modular doubling like this is very cheap.
  2915. */
  2916. if (bitpos & bitword) {
  2917. /*
  2918. * Double the number. If there was a carry out OR
  2919. * the result is greater than the modulus, subract
  2920. * the modulus.
  2921. */
  2922. if (lbnDouble_64(a1, mlen) ||
  2923. lbnCmp_64(a1, mod, mlen) > 0)
  2924. (void)lbnSubN_64(a1, mod, mlen);
  2925. }
  2926. /* Advance to the next exponent bit */
  2927. bitpos >>= 1;
  2928. if (!bitpos) {
  2929. if (!--elen)
  2930. break; /* Done! */
  2931. bitword = BIGLITTLE(*++bitptr,*--bitptr);
  2932. bitpos = (BNWORD64)1<<(64-1);
  2933. }
  2934. /*
  2935. * The elen/bitword/bitpos bit buffer is known to be
  2936. * non-empty, i.e. there is at least one more unconsumed bit.
  2937. * Thus, it's safe to square the number.
  2938. */
  2939. lbnMontSquare_64(b, a1, mod, mlen, inv);
  2940. /* Rename result (in b) back to a (a1, really). */
  2941. a1 = b; b = a; a = a1;
  2942. a1 = BIGLITTLE(a-mlen,a+mlen);
  2943. #if BNYIELD
  2944. if (bnYield && (y = bnYield()) < 0)
  2945. goto yield;
  2946. #endif
  2947. }
  2948. /* DONE! Just a little bit of cleanup... */
  2949. /*
  2950. * Convert result out of Montgomery form... this is
  2951. * just a Montgomery reduction.
  2952. */
  2953. lbnCopy_64(a, a1, mlen);
  2954. lbnZero_64(a1, mlen);
  2955. lbnMontReduce_64(a, mod, mlen, inv);
  2956. lbnCopy_64(n, a1, mlen);
  2957. /* Clean up - free intermediate storage */
  2958. y = 0;
  2959. #if BNYIELD
  2960. yield:
  2961. #endif
  2962. LBNFREE(b, 2*mlen);
  2963. LBNFREE(a, 2*mlen);
  2964. return y; /* Success */
  2965. }
  2966. /*
  2967. * Returns a substring of the big-endian array of bytes representation
  2968. * of the bignum array based on two parameters, the least significant
  2969. * byte number (0 to start with the least significant byte) and the
  2970. * length. I.e. the number returned is a representation of
  2971. * (bn / 2^(8*lsbyte)) % 2 ^ (8*buflen).
  2972. *
  2973. * It is an error if the bignum is not at least buflen + lsbyte bytes
  2974. * long.
  2975. *
  2976. * This code assumes that the compiler has the minimal intelligence
  2977. * neded to optimize divides and modulo operations on an unsigned data
  2978. * type with a power of two.
  2979. */
  2980. void
  2981. lbnExtractBigBytes_64(BNWORD64 const *n, unsigned char *buf,
  2982. unsigned lsbyte, unsigned buflen)
  2983. {
  2984. BNWORD64 t = 0; /* Needed to shut up uninitialized var warnings */
  2985. unsigned shift;
  2986. lsbyte += buflen;
  2987. shift = (8 * lsbyte) % 64;
  2988. lsbyte /= (64/8); /* Convert to word offset */
  2989. BIGLITTLE(n -= lsbyte, n += lsbyte);
  2990. if (shift)
  2991. t = BIGLITTLE(n[-1],n[0]);
  2992. while (buflen--) {
  2993. if (!shift) {
  2994. t = BIGLITTLE(*n++,*--n);
  2995. shift = 64;
  2996. }
  2997. shift -= 8;
  2998. *buf++ = (unsigned char)(t>>shift);
  2999. }
  3000. }
  3001. /*
  3002. * Merge a big-endian array of bytes into a bignum array.
  3003. * The array had better be big enough. This is
  3004. * equivalent to extracting the entire bignum into a
  3005. * large byte array, copying the input buffer into the
  3006. * middle of it, and converting back to a bignum.
  3007. *
  3008. * The buf is "len" bytes long, and its *last* byte is at
  3009. * position "lsbyte" from the end of the bignum.
  3010. *
  3011. * Note that this is a pain to get right. Fortunately, it's hardly
  3012. * critical for efficiency.
  3013. */
  3014. void
  3015. lbnInsertBigBytes_64(BNWORD64 *n, unsigned char const *buf,
  3016. unsigned lsbyte, unsigned buflen)
  3017. {
  3018. BNWORD64 t = 0; /* Shut up uninitialized varibale warnings */
  3019. lsbyte += buflen;
  3020. BIGLITTLE(n -= lsbyte/(64/8), n += lsbyte/(64/8));
  3021. /* Load up leading odd bytes */
  3022. if (lsbyte % (64/8)) {
  3023. t = BIGLITTLE(*--n,*n++);
  3024. t >>= (lsbyte * 8) % 64;
  3025. }
  3026. /* The main loop - merge into t, storing at each word boundary. */
  3027. while (buflen--) {
  3028. t = (t << 8) | *buf++;
  3029. if ((--lsbyte % (64/8)) == 0)
  3030. BIGLITTLE(*n++,*--n) = t;
  3031. }
  3032. /* Merge odd bytes in t into last word */
  3033. lsbyte = (lsbyte * 8) % 64;
  3034. if (lsbyte) {
  3035. t <<= lsbyte;
  3036. t |= (((BNWORD64)1 << lsbyte) - 1) & BIGLITTLE(n[0],n[-1]);
  3037. BIGLITTLE(n[0],n[-1]) = t;
  3038. }
  3039. return;
  3040. }
  3041. /*
  3042. * Returns a substring of the little-endian array of bytes representation
  3043. * of the bignum array based on two parameters, the least significant
  3044. * byte number (0 to start with the least significant byte) and the
  3045. * length. I.e. the number returned is a representation of
  3046. * (bn / 2^(8*lsbyte)) % 2 ^ (8*buflen).
  3047. *
  3048. * It is an error if the bignum is not at least buflen + lsbyte bytes
  3049. * long.
  3050. *
  3051. * This code assumes that the compiler has the minimal intelligence
  3052. * neded to optimize divides and modulo operations on an unsigned data
  3053. * type with a power of two.
  3054. */
  3055. void
  3056. lbnExtractLittleBytes_64(BNWORD64 const *n, unsigned char *buf,
  3057. unsigned lsbyte, unsigned buflen)
  3058. {
  3059. BNWORD64 t = 0; /* Needed to shut up uninitialized var warnings */
  3060. BIGLITTLE(n -= lsbyte/(64/8), n += lsbyte/(64/8));
  3061. if (lsbyte % (64/8)) {
  3062. t = BIGLITTLE(*--n,*n++);
  3063. t >>= (lsbyte % (64/8)) * 8 ;
  3064. }
  3065. while (buflen--) {
  3066. if ((lsbyte++ % (64/8)) == 0)
  3067. t = BIGLITTLE(*--n,*n++);
  3068. *buf++ = (unsigned char)t;
  3069. t >>= 8;
  3070. }
  3071. }
  3072. /*
  3073. * Merge a little-endian array of bytes into a bignum array.
  3074. * The array had better be big enough. This is
  3075. * equivalent to extracting the entire bignum into a
  3076. * large byte array, copying the input buffer into the
  3077. * middle of it, and converting back to a bignum.
  3078. *
  3079. * The buf is "len" bytes long, and its first byte is at
  3080. * position "lsbyte" from the end of the bignum.
  3081. *
  3082. * Note that this is a pain to get right. Fortunately, it's hardly
  3083. * critical for efficiency.
  3084. */
  3085. void
  3086. lbnInsertLittleBytes_64(BNWORD64 *n, unsigned char const *buf,
  3087. unsigned lsbyte, unsigned buflen)
  3088. {
  3089. BNWORD64 t = 0; /* Shut up uninitialized varibale warnings */
  3090. /* Move to most-significant end */
  3091. lsbyte += buflen;
  3092. buf += buflen;
  3093. BIGLITTLE(n -= lsbyte/(64/8), n += lsbyte/(64/8));
  3094. /* Load up leading odd bytes */
  3095. if (lsbyte % (64/8)) {
  3096. t = BIGLITTLE(*--n,*n++);
  3097. t >>= (lsbyte * 8) % 64;
  3098. }
  3099. /* The main loop - merge into t, storing at each word boundary. */
  3100. while (buflen--) {
  3101. t = (t << 8) | *--buf;
  3102. if ((--lsbyte % (64/8)) == 0)
  3103. BIGLITTLE(*n++,*--n) = t;
  3104. }
  3105. /* Merge odd bytes in t into last word */
  3106. lsbyte = (lsbyte * 8) % 64;
  3107. if (lsbyte) {
  3108. t <<= lsbyte;
  3109. t |= (((BNWORD64)1 << lsbyte) - 1) & BIGLITTLE(n[0],n[-1]);
  3110. BIGLITTLE(n[0],n[-1]) = t;
  3111. }
  3112. return;
  3113. }
  3114. #ifdef DEADCODE /* This was a precursor to the more flexible lbnExtractBytes */
  3115. /*
  3116. * Convert a big-endian array of bytes to a bignum.
  3117. * Returns the number of words in the bignum.
  3118. * Note the expression "64/8" for the number of bytes per word.
  3119. * This is so the word-size adjustment will work.
  3120. */
  3121. unsigned
  3122. lbnFromBytes_64(BNWORD64 *a, unsigned char const *b, unsigned blen)
  3123. {
  3124. BNWORD64 t;
  3125. unsigned alen = (blen + (64/8-1))/(64/8);
  3126. BIGLITTLE(a -= alen, a += alen);
  3127. while (blen) {
  3128. t = 0;
  3129. do {
  3130. t = t << 8 | *b++;
  3131. } while (--blen & (64/8-1));
  3132. BIGLITTLE(*a++,*--a) = t;
  3133. }
  3134. return alen;
  3135. }
  3136. #endif
  3137. /*
  3138. * Computes the GCD of a and b. Modifies both arguments; when it returns,
  3139. * one of them is the GCD and the other is trash. The return value
  3140. * indicates which: 0 for a, and 1 for b. The length of the retult is
  3141. * returned in rlen. Both inputs must have one extra word of precision.
  3142. * alen must be >= blen.
  3143. *
  3144. * TODO: use the binary algorithm (Knuth section 4.5.2, algorithm B).
  3145. * This is based on taking out common powers of 2, then repeatedly:
  3146. * gcd(2*u,v) = gcd(u,2*v) = gcd(u,v) - isolated powers of 2 can be deleted.
  3147. * gcd(u,v) = gcd(u-v,v) - the numbers can be easily reduced.
  3148. * It gets less reduction per step, but the steps are much faster than
  3149. * the division case.
  3150. */
  3151. int
  3152. lbnGcd_64(BNWORD64 *a, unsigned alen, BNWORD64 *b, unsigned blen,
  3153. unsigned *rlen)
  3154. {
  3155. #if BNYIELD
  3156. int y;
  3157. #endif
  3158. assert(alen >= blen);
  3159. while (blen != 0) {
  3160. (void)lbnDiv_64(BIGLITTLE(a-blen,a+blen), a, alen, b, blen);
  3161. alen = lbnNorm_64(a, blen);
  3162. if (alen == 0) {
  3163. *rlen = blen;
  3164. return 1;
  3165. }
  3166. (void)lbnDiv_64(BIGLITTLE(b-alen,b+alen), b, blen, a, alen);
  3167. blen = lbnNorm_64(b, alen);
  3168. #if BNYIELD
  3169. if (bnYield && (y = bnYield()) < 0)
  3170. return y;
  3171. #endif
  3172. }
  3173. *rlen = alen;
  3174. return 0;
  3175. }
  3176. /*
  3177. * Invert "a" modulo "mod" using the extended Euclidean algorithm.
  3178. * Note that this only computes one of the cosequences, and uses the
  3179. * theorem that the signs flip every step and the absolute value of
  3180. * the cosequence values are always bounded by the modulus to avoid
  3181. * having to work with negative numbers.
  3182. * gcd(a,mod) had better equal 1. Returns 1 if the GCD is NOT 1.
  3183. * a must be one word longer than "mod". It is overwritten with the
  3184. * result.
  3185. * TODO: Use Richard Schroeppel's *much* faster algorithm.
  3186. */
  3187. int
  3188. lbnInv_64(BNWORD64 *a, unsigned alen, BNWORD64 const *mod, unsigned mlen)
  3189. {
  3190. BNWORD64 *b; /* Hold a copy of mod during GCD reduction */
  3191. BNWORD64 *p; /* Temporary for products added to t0 and t1 */
  3192. BNWORD64 *t0, *t1; /* Inverse accumulators */
  3193. BNWORD64 cy;
  3194. unsigned blen, t0len, t1len, plen;
  3195. int y;
  3196. alen = lbnNorm_64(a, alen);
  3197. if (!alen)
  3198. return 1; /* No inverse */
  3199. mlen = lbnNorm_64(mod, mlen);
  3200. assert (alen <= mlen);
  3201. /* Inverse of 1 is 1 */
  3202. if (alen == 1 && BIGLITTLE(a[-1],a[0]) == 1) {
  3203. lbnZero_64(BIGLITTLE(a-alen,a+alen), mlen-alen);
  3204. return 0;
  3205. }
  3206. /* Allocate a pile of space */
  3207. LBNALLOC(b, BNWORD64, mlen+1);
  3208. if (b) {
  3209. /*
  3210. * Although products are guaranteed to always be less than the
  3211. * modulus, it can involve multiplying two 3-word numbers to
  3212. * get a 5-word result, requiring a 6th word to store a 0
  3213. * temporarily. Thus, mlen + 1.
  3214. */
  3215. LBNALLOC(p, BNWORD64, mlen+1);
  3216. if (p) {
  3217. LBNALLOC(t0, BNWORD64, mlen);
  3218. if (t0) {
  3219. LBNALLOC(t1, BNWORD64, mlen);
  3220. if (t1)
  3221. goto allocated;
  3222. LBNFREE(t0, mlen);
  3223. }
  3224. LBNFREE(p, mlen+1);
  3225. }
  3226. LBNFREE(b, mlen+1);
  3227. }
  3228. return -1;
  3229. allocated:
  3230. /* Set t0 to 1 */
  3231. t0len = 1;
  3232. BIGLITTLE(t0[-1],t0[0]) = 1;
  3233. /* b = mod */
  3234. lbnCopy_64(b, mod, mlen);
  3235. /* blen = mlen (implicitly) */
  3236. /* t1 = b / a; b = b % a */
  3237. cy = lbnDiv_64(t1, b, mlen, a, alen);
  3238. *(BIGLITTLE(t1-(mlen-alen)-1,t1+(mlen-alen))) = cy;
  3239. t1len = lbnNorm_64(t1, mlen-alen+1);
  3240. blen = lbnNorm_64(b, alen);
  3241. /* while (b > 1) */
  3242. while (blen > 1 || BIGLITTLE(b[-1],b[0]) != (BNWORD64)1) {
  3243. /* q = a / b; a = a % b; */
  3244. if (alen < blen || (alen == blen && lbnCmp_64(a, a, alen) < 0))
  3245. assert(0);
  3246. cy = lbnDiv_64(BIGLITTLE(a-blen,a+blen), a, alen, b, blen);
  3247. *(BIGLITTLE(a-alen-1,a+alen)) = cy;
  3248. plen = lbnNorm_64(BIGLITTLE(a-blen,a+blen), alen-blen+1);
  3249. assert(plen);
  3250. alen = lbnNorm_64(a, blen);
  3251. if (!alen)
  3252. goto failure; /* GCD not 1 */
  3253. /* t0 += q * t1; */
  3254. assert(plen+t1len <= mlen+1);
  3255. lbnMul_64(p, BIGLITTLE(a-blen,a+blen), plen, t1, t1len);
  3256. plen = lbnNorm_64(p, plen + t1len);
  3257. assert(plen <= mlen);
  3258. if (plen > t0len) {
  3259. lbnZero_64(BIGLITTLE(t0-t0len,t0+t0len), plen-t0len);
  3260. t0len = plen;
  3261. }
  3262. cy = lbnAddN_64(t0, p, plen);
  3263. if (cy) {
  3264. if (t0len > plen) {
  3265. cy = lbnAdd1_64(BIGLITTLE(t0-plen,t0+plen),
  3266. t0len-plen, cy);
  3267. }
  3268. if (cy) {
  3269. BIGLITTLE(t0[-t0len-1],t0[t0len]) = cy;
  3270. t0len++;
  3271. }
  3272. }
  3273. /* if (a <= 1) return a ? t0 : FAIL; */
  3274. if (alen <= 1 && BIGLITTLE(a[-1],a[0]) == (BNWORD64)1) {
  3275. if (alen == 0)
  3276. goto failure; /* FAIL */
  3277. assert(t0len <= mlen);
  3278. lbnCopy_64(a, t0, t0len);
  3279. lbnZero_64(BIGLITTLE(a-t0len, a+t0len), mlen-t0len);
  3280. goto success;
  3281. }
  3282. /* q = b / a; b = b % a; */
  3283. if (blen < alen || (blen == alen && lbnCmp_64(b, a, alen) < 0))
  3284. assert(0);
  3285. cy = lbnDiv_64(BIGLITTLE(b-alen,b+alen), b, blen, a, alen);
  3286. *(BIGLITTLE(b-blen-1,b+blen)) = cy;
  3287. plen = lbnNorm_64(BIGLITTLE(b-alen,b+alen), blen-alen+1);
  3288. assert(plen);
  3289. blen = lbnNorm_64(b, alen);
  3290. if (!blen)
  3291. goto failure; /* GCD not 1 */
  3292. /* t1 += q * t0; */
  3293. assert(plen+t0len <= mlen+1);
  3294. lbnMul_64(p, BIGLITTLE(b-alen,b+alen), plen, t0, t0len);
  3295. plen = lbnNorm_64(p, plen + t0len);
  3296. assert(plen <= mlen);
  3297. if (plen > t1len) {
  3298. lbnZero_64(BIGLITTLE(t1-t1len,t1+t1len), plen-t1len);
  3299. t1len = plen;
  3300. }
  3301. cy = lbnAddN_64(t1, p, plen);
  3302. if (cy) {
  3303. if (t1len > plen) {
  3304. cy = lbnAdd1_64(BIGLITTLE(t1-plen,t0+plen),
  3305. t1len-plen, cy);
  3306. }
  3307. if (cy) {
  3308. BIGLITTLE(t1[-t1len-1],t1[t1len]) = cy;
  3309. t1len++;
  3310. }
  3311. }
  3312. #if BNYIELD
  3313. if (bnYield && (y = bnYield() < 0))
  3314. goto yield;
  3315. #endif
  3316. }
  3317. if (!blen)
  3318. goto failure; /* gcd(a, mod) != 1 -- FAIL */
  3319. /* return mod-t1 */
  3320. lbnCopy_64(a, mod, mlen);
  3321. assert(t1len <= mlen);
  3322. cy = lbnSubN_64(a, t1, t1len);
  3323. if (cy) {
  3324. assert(mlen > t1len);
  3325. cy = lbnSub1_64(BIGLITTLE(a-t1len, a+t1len), mlen-t1len, cy);
  3326. assert(!cy);
  3327. }
  3328. success:
  3329. LBNFREE(t1, mlen);
  3330. LBNFREE(t0, mlen);
  3331. LBNFREE(p, mlen+1);
  3332. LBNFREE(b, mlen+1);
  3333. return 0;
  3334. failure: /* GCD is not 1 - no inverse exists! */
  3335. y = 1;
  3336. #if BNYIELD
  3337. yield:
  3338. #endif
  3339. LBNFREE(t1, mlen);
  3340. LBNFREE(t0, mlen);
  3341. LBNFREE(p, mlen+1);
  3342. LBNFREE(b, mlen+1);
  3343. return y;
  3344. }
  3345. /*
  3346. * Precompute powers of "a" mod "mod". Compute them every "bits"
  3347. * for "n" steps. This is sufficient to compute powers of g with
  3348. * exponents up to n*bits bits long, i.e. less than 2^(n*bits).
  3349. *
  3350. * This assumes that the caller has already initialized "array" to point
  3351. * to "n" buffers of size "mlen".
  3352. */
  3353. int
  3354. lbnBasePrecompBegin_64(BNWORD64 **array, unsigned n, unsigned bits,
  3355. BNWORD64 const *g, unsigned glen, BNWORD64 *mod, unsigned mlen)
  3356. {
  3357. BNWORD64 *a, *b; /* Temporary double-width accumulators */
  3358. BNWORD64 *a1; /* Pointer to high half of a*/
  3359. BNWORD64 inv; /* Montgomery inverse of LSW of mod */
  3360. BNWORD64 *t;
  3361. unsigned i;
  3362. glen = lbnNorm_64(g, glen);
  3363. assert(glen);
  3364. assert (mlen == lbnNorm_64(mod, mlen));
  3365. assert (glen <= mlen);
  3366. /* Allocate two temporary buffers, and the array slots */
  3367. LBNALLOC(a, BNWORD64, mlen*2);
  3368. if (!a)
  3369. return -1;
  3370. LBNALLOC(b, BNWORD64, mlen*2);
  3371. if (!b) {
  3372. LBNFREE(a, 2*mlen);
  3373. return -1;
  3374. }
  3375. /* Okay, all ready */
  3376. /* Convert n to Montgomery form */
  3377. inv = BIGLITTLE(mod[-1],mod[0]); /* LSW of modulus */
  3378. assert(inv & 1); /* Modulus must be odd */
  3379. inv = lbnMontInv1_64(inv);
  3380. /* Move g up "mlen" words into a (clearing the low mlen words) */
  3381. a1 = BIGLITTLE(a-mlen,a+mlen);
  3382. lbnCopy_64(a1, g, glen);
  3383. lbnZero_64(a, mlen);
  3384. /* Do the division - dump the quotient into the high-order words */
  3385. (void)lbnDiv_64(a1, a, mlen+glen, mod, mlen);
  3386. /* Copy the first value into the array */
  3387. t = *array;
  3388. lbnCopy_64(t, a, mlen);
  3389. a1 = a; /* This first value is *not* shifted up */
  3390. /* Now compute the remaining n-1 array entries */
  3391. assert(bits);
  3392. assert(n);
  3393. while (--n) {
  3394. i = bits;
  3395. do {
  3396. /* Square a1 into b1 */
  3397. lbnMontSquare_64(b, a1, mod, mlen, inv);
  3398. t = b; b = a; a = t;
  3399. a1 = BIGLITTLE(a-mlen, a+mlen);
  3400. } while (--i);
  3401. t = *++array;
  3402. lbnCopy_64(t, a1, mlen);
  3403. }
  3404. /* Hooray, we're done. */
  3405. LBNFREE(b, 2*mlen);
  3406. LBNFREE(a, 2*mlen);
  3407. return 0;
  3408. }
  3409. /*
  3410. * result = base^exp (mod mod). "array" is a an array of pointers
  3411. * to procomputed powers of base, each 2^bits apart. (I.e. array[i]
  3412. * is base^(2^(i*bits))).
  3413. *
  3414. * The algorithm consists of:
  3415. * a = b = (powers of g to be raised to the power 2^bits-1)
  3416. * a *= b *= (powers of g to be raised to the power 2^bits-2)
  3417. * ...
  3418. * a *= b *= (powers of g to be raised to the power 1)
  3419. *
  3420. * All we do is walk the exponent 2^bits-1 times in groups of "bits" bits,
  3421. */
  3422. int
  3423. lbnBasePrecompExp_64(BNWORD64 *result, BNWORD64 const * const *array,
  3424. unsigned bits, BNWORD64 const *exp, unsigned elen,
  3425. BNWORD64 const *mod, unsigned mlen)
  3426. {
  3427. BNWORD64 *a, *b, *c, *t;
  3428. BNWORD64 *a1, *b1;
  3429. int anull, bnull; /* Null flags: values are implicitly 1 */
  3430. unsigned i, j; /* Loop counters */
  3431. unsigned mask; /* Exponent bits to examime */
  3432. BNWORD64 const *eptr; /* Pointer into exp */
  3433. BNWORD64 buf, curbits, nextword; /* Bit-buffer varaibles */
  3434. BNWORD64 inv; /* Inverse of LSW of modulus */
  3435. unsigned ewords; /* Words of exponent left */
  3436. int bufbits; /* Number of valid bits */
  3437. int y = 0;
  3438. mlen = lbnNorm_64(mod, mlen);
  3439. assert (mlen);
  3440. elen = lbnNorm_64(exp, elen);
  3441. if (!elen) {
  3442. lbnZero_64(result, mlen);
  3443. BIGLITTLE(result[-1],result[0]) = 1;
  3444. return 0;
  3445. }
  3446. /*
  3447. * This could be precomputed, but it's so cheap, and it would require
  3448. * making the precomputation structure word-size dependent.
  3449. */
  3450. inv = lbnMontInv1_64(mod[BIGLITTLE(-1,0)]); /* LSW of modulus */
  3451. assert(elen);
  3452. /*
  3453. * Allocate three temporary buffers. The current numbers generally
  3454. * live in the upper halves of these buffers.
  3455. */
  3456. LBNALLOC(a, BNWORD64, mlen*2);
  3457. if (a) {
  3458. LBNALLOC(b, BNWORD64, mlen*2);
  3459. if (b) {
  3460. LBNALLOC(c, BNWORD64, mlen*2);
  3461. if (c)
  3462. goto allocated;
  3463. LBNFREE(b, 2*mlen);
  3464. }
  3465. LBNFREE(a, 2*mlen);
  3466. }
  3467. return -1;
  3468. allocated:
  3469. anull = bnull = 1;
  3470. mask = (1u<<bits) - 1;
  3471. for (i = mask; i; --i) {
  3472. /* Set up bit buffer for walking the exponent */
  3473. eptr = exp;
  3474. buf = BIGLITTLE(*--eptr, *eptr++);
  3475. ewords = elen-1;
  3476. bufbits = 64;
  3477. for (j = 0; ewords || buf; j++) {
  3478. /* Shift down current buffer */
  3479. curbits = buf;
  3480. buf >>= bits;
  3481. /* If necessary, add next word */
  3482. bufbits -= bits;
  3483. if (bufbits < 0 && ewords > 0) {
  3484. nextword = BIGLITTLE(*--eptr, *eptr++);
  3485. ewords--;
  3486. curbits |= nextword << (bufbits+bits);
  3487. buf = nextword >> -bufbits;
  3488. bufbits += 64;
  3489. }
  3490. /* If appropriate, multiply b *= array[j] */
  3491. if ((curbits & mask) == i) {
  3492. BNWORD64 const *d = array[j];
  3493. b1 = BIGLITTLE(b-mlen-1,b+mlen);
  3494. if (bnull) {
  3495. lbnCopy_64(b1, d, mlen);
  3496. bnull = 0;
  3497. } else {
  3498. lbnMontMul_64(c, b1, d, mod, mlen, inv);
  3499. t = c; c = b; b = t;
  3500. }
  3501. #if BNYIELD
  3502. if (bnYield && (y = bnYield() < 0))
  3503. goto yield;
  3504. #endif
  3505. }
  3506. }
  3507. /* Multiply a *= b */
  3508. if (!bnull) {
  3509. a1 = BIGLITTLE(a-mlen-1,a+mlen);
  3510. b1 = BIGLITTLE(b-mlen-1,b+mlen);
  3511. if (anull) {
  3512. lbnCopy_64(a1, b1, mlen);
  3513. anull = 0;
  3514. } else {
  3515. lbnMontMul_64(c, a1, b1, mod, mlen, inv);
  3516. t = c; c = a; a = t;
  3517. }
  3518. }
  3519. }
  3520. assert(!anull); /* If it were, elen would have been 0 */
  3521. /* Convert out of Montgomery form and return */
  3522. a1 = BIGLITTLE(a-mlen-1,a+mlen);
  3523. lbnCopy_64(a, a1, mlen);
  3524. lbnZero_64(a1, mlen);
  3525. lbnMontReduce_64(a, mod, mlen, inv);
  3526. lbnCopy_64(result, a1, mlen);
  3527. #if BNYIELD
  3528. yield:
  3529. #endif
  3530. LBNFREE(c, 2*mlen);
  3531. LBNFREE(b, 2*mlen);
  3532. LBNFREE(a, 2*mlen);
  3533. return y;
  3534. }
  3535. /*
  3536. * result = base1^exp1 *base2^exp2 (mod mod). "array1" and "array2" are
  3537. * arrays of pointers to procomputed powers of the corresponding bases,
  3538. * each 2^bits apart. (I.e. array1[i] is base1^(2^(i*bits))).
  3539. *
  3540. * Bits must be the same in both. (It could be made adjustable, but it's
  3541. * a bit of a pain. Just make them both equal to the larger one.)
  3542. *
  3543. * The algorithm consists of:
  3544. * a = b = (powers of base1 and base2 to be raised to the power 2^bits-1)
  3545. * a *= b *= (powers of base1 and base2 to be raised to the power 2^bits-2)
  3546. * ...
  3547. * a *= b *= (powers of base1 and base2 to be raised to the power 1)
  3548. *
  3549. * All we do is walk the exponent 2^bits-1 times in groups of "bits" bits,
  3550. */
  3551. int
  3552. lbnDoubleBasePrecompExp_64(BNWORD64 *result, unsigned bits,
  3553. BNWORD64 const * const *array1, BNWORD64 const *exp1, unsigned elen1,
  3554. BNWORD64 const * const *array2, BNWORD64 const *exp2,
  3555. unsigned elen2, BNWORD64 const *mod, unsigned mlen)
  3556. {
  3557. BNWORD64 *a, *b, *c, *t;
  3558. BNWORD64 *a1, *b1;
  3559. int anull, bnull; /* Null flags: values are implicitly 1 */
  3560. unsigned i, j, k; /* Loop counters */
  3561. unsigned mask; /* Exponent bits to examime */
  3562. BNWORD64 const *eptr; /* Pointer into exp */
  3563. BNWORD64 buf, curbits, nextword; /* Bit-buffer varaibles */
  3564. BNWORD64 inv; /* Inverse of LSW of modulus */
  3565. unsigned ewords; /* Words of exponent left */
  3566. int bufbits; /* Number of valid bits */
  3567. int y = 0;
  3568. BNWORD64 const * const *array;
  3569. mlen = lbnNorm_64(mod, mlen);
  3570. assert (mlen);
  3571. elen1 = lbnNorm_64(exp1, elen1);
  3572. if (!elen1) {
  3573. return lbnBasePrecompExp_64(result, array2, bits, exp2, elen2,
  3574. mod, mlen);
  3575. }
  3576. elen2 = lbnNorm_64(exp2, elen2);
  3577. if (!elen2) {
  3578. return lbnBasePrecompExp_64(result, array1, bits, exp1, elen1,
  3579. mod, mlen);
  3580. }
  3581. /*
  3582. * This could be precomputed, but it's so cheap, and it would require
  3583. * making the precomputation structure word-size dependent.
  3584. */
  3585. inv = lbnMontInv1_64(mod[BIGLITTLE(-1,0)]); /* LSW of modulus */
  3586. assert(elen1);
  3587. assert(elen2);
  3588. /*
  3589. * Allocate three temporary buffers. The current numbers generally
  3590. * live in the upper halves of these buffers.
  3591. */
  3592. LBNALLOC(a, BNWORD64, mlen*2);
  3593. if (a) {
  3594. LBNALLOC(b, BNWORD64, mlen*2);
  3595. if (b) {
  3596. LBNALLOC(c, BNWORD64, mlen*2);
  3597. if (c)
  3598. goto allocated;
  3599. LBNFREE(b, 2*mlen);
  3600. }
  3601. LBNFREE(a, 2*mlen);
  3602. }
  3603. return -1;
  3604. allocated:
  3605. anull = bnull = 1;
  3606. mask = (1u<<bits) - 1;
  3607. for (i = mask; i; --i) {
  3608. /* Walk each exponent in turn */
  3609. for (k = 0; k < 2; k++) {
  3610. /* Set up the exponent for walking */
  3611. array = k ? array2 : array1;
  3612. eptr = k ? exp2 : exp1;
  3613. ewords = (k ? elen2 : elen1) - 1;
  3614. /* Set up bit buffer for walking the exponent */
  3615. buf = BIGLITTLE(*--eptr, *eptr++);
  3616. bufbits = 64;
  3617. for (j = 0; ewords || buf; j++) {
  3618. /* Shift down current buffer */
  3619. curbits = buf;
  3620. buf >>= bits;
  3621. /* If necessary, add next word */
  3622. bufbits -= bits;
  3623. if (bufbits < 0 && ewords > 0) {
  3624. nextword = BIGLITTLE(*--eptr, *eptr++);
  3625. ewords--;
  3626. curbits |= nextword << (bufbits+bits);
  3627. buf = nextword >> -bufbits;
  3628. bufbits += 64;
  3629. }
  3630. /* If appropriate, multiply b *= array[j] */
  3631. if ((curbits & mask) == i) {
  3632. BNWORD64 const *d = array[j];
  3633. b1 = BIGLITTLE(b-mlen-1,b+mlen);
  3634. if (bnull) {
  3635. lbnCopy_64(b1, d, mlen);
  3636. bnull = 0;
  3637. } else {
  3638. lbnMontMul_64(c, b1, d, mod, mlen, inv);
  3639. t = c; c = b; b = t;
  3640. }
  3641. #if BNYIELD
  3642. if (bnYield && (y = bnYield() < 0))
  3643. goto yield;
  3644. #endif
  3645. }
  3646. }
  3647. }
  3648. /* Multiply a *= b */
  3649. if (!bnull) {
  3650. a1 = BIGLITTLE(a-mlen-1,a+mlen);
  3651. b1 = BIGLITTLE(b-mlen-1,b+mlen);
  3652. if (anull) {
  3653. lbnCopy_64(a1, b1, mlen);
  3654. anull = 0;
  3655. } else {
  3656. lbnMontMul_64(c, a1, b1, mod, mlen, inv);
  3657. t = c; c = a; a = t;
  3658. }
  3659. }
  3660. }
  3661. assert(!anull); /* If it were, elen would have been 0 */
  3662. /* Convert out of Montgomery form and return */
  3663. a1 = BIGLITTLE(a-mlen-1,a+mlen);
  3664. lbnCopy_64(a, a1, mlen);
  3665. lbnZero_64(a1, mlen);
  3666. lbnMontReduce_64(a, mod, mlen, inv);
  3667. lbnCopy_64(result, a1, mlen);
  3668. #if BNYIELD
  3669. yield:
  3670. #endif
  3671. LBNFREE(c, 2*mlen);
  3672. LBNFREE(b, 2*mlen);
  3673. LBNFREE(a, 2*mlen);
  3674. return y;
  3675. }