2
0

eng_devcrypto.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. /*
  2. * Copyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the OpenSSL license (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include "e_os.h"
  10. #include <string.h>
  11. #include <sys/types.h>
  12. #include <sys/stat.h>
  13. #include <fcntl.h>
  14. #include <sys/ioctl.h>
  15. #include <unistd.h>
  16. #include <assert.h>
  17. #include <openssl/evp.h>
  18. #include <openssl/err.h>
  19. #include <openssl/engine.h>
  20. #include <openssl/objects.h>
  21. #include <crypto/cryptodev.h>
  22. #include "crypto/engine.h"
  23. /* #define ENGINE_DEVCRYPTO_DEBUG */
  24. #if CRYPTO_ALGORITHM_MIN < CRYPTO_ALGORITHM_MAX
  25. # define CHECK_BSD_STYLE_MACROS
  26. #endif
  27. /*
  28. * ONE global file descriptor for all sessions. This allows operations
  29. * such as digest session data copying (see digest_copy()), but is also
  30. * saner... why re-open /dev/crypto for every session?
  31. */
  32. static int cfd;
  33. static int clean_devcrypto_session(struct session_op *sess) {
  34. if (ioctl(cfd, CIOCFSESSION, &sess->ses) < 0) {
  35. SYSerr(SYS_F_IOCTL, errno);
  36. return 0;
  37. }
  38. memset(sess, 0, sizeof(struct session_op));
  39. return 1;
  40. }
  41. /******************************************************************************
  42. *
  43. * Ciphers
  44. *
  45. * Because they all do the same basic operation, we have only one set of
  46. * method functions for them all to share, and a mapping table between
  47. * NIDs and cryptodev IDs, with all the necessary size data.
  48. *
  49. *****/
  50. struct cipher_ctx {
  51. struct session_op sess;
  52. int op; /* COP_ENCRYPT or COP_DECRYPT */
  53. unsigned long mode; /* EVP_CIPH_*_MODE */
  54. /* to handle ctr mode being a stream cipher */
  55. unsigned char partial[EVP_MAX_BLOCK_LENGTH];
  56. unsigned int blocksize, num;
  57. };
  58. static const struct cipher_data_st {
  59. int nid;
  60. int blocksize;
  61. int keylen;
  62. int ivlen;
  63. int flags;
  64. int devcryptoid;
  65. } cipher_data[] = {
  66. #ifndef OPENSSL_NO_DES
  67. { NID_des_cbc, 8, 8, 8, EVP_CIPH_CBC_MODE, CRYPTO_DES_CBC },
  68. { NID_des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, CRYPTO_3DES_CBC },
  69. #endif
  70. #ifndef OPENSSL_NO_BF
  71. { NID_bf_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_BLF_CBC },
  72. #endif
  73. #ifndef OPENSSL_NO_CAST
  74. { NID_cast5_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_CAST_CBC },
  75. #endif
  76. { NID_aes_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
  77. { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
  78. { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
  79. #ifndef OPENSSL_NO_RC4
  80. { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 },
  81. #endif
  82. #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR)
  83. { NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
  84. { NID_aes_192_ctr, 16, 192 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
  85. { NID_aes_256_ctr, 16, 256 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
  86. #endif
  87. #if 0 /* Not yet supported */
  88. { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
  89. { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
  90. #endif
  91. #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
  92. { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
  93. { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
  94. { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
  95. #endif
  96. #if 0 /* Not yet supported */
  97. { NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
  98. { NID_aes_192_gcm, 16, 192 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
  99. { NID_aes_256_gcm, 16, 256 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
  100. #endif
  101. #ifndef OPENSSL_NO_CAMELLIA
  102. { NID_camellia_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE,
  103. CRYPTO_CAMELLIA_CBC },
  104. { NID_camellia_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE,
  105. CRYPTO_CAMELLIA_CBC },
  106. { NID_camellia_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE,
  107. CRYPTO_CAMELLIA_CBC },
  108. #endif
  109. };
  110. static size_t get_cipher_data_index(int nid)
  111. {
  112. size_t i;
  113. for (i = 0; i < OSSL_NELEM(cipher_data); i++)
  114. if (nid == cipher_data[i].nid)
  115. return i;
  116. /*
  117. * Code further down must make sure that only NIDs in the table above
  118. * are used. If any other NID reaches this function, there's a grave
  119. * coding error further down.
  120. */
  121. assert("Code that never should be reached" == NULL);
  122. return -1;
  123. }
  124. static const struct cipher_data_st *get_cipher_data(int nid)
  125. {
  126. return &cipher_data[get_cipher_data_index(nid)];
  127. }
  128. /*
  129. * Following are the three necessary functions to map OpenSSL functionality
  130. * with cryptodev.
  131. */
  132. static int cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  133. const unsigned char *iv, int enc)
  134. {
  135. struct cipher_ctx *cipher_ctx =
  136. (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
  137. const struct cipher_data_st *cipher_d =
  138. get_cipher_data(EVP_CIPHER_CTX_nid(ctx));
  139. /* cleanup a previous session */
  140. if (cipher_ctx->sess.ses != 0 &&
  141. clean_devcrypto_session(&cipher_ctx->sess) == 0)
  142. return 0;
  143. cipher_ctx->sess.cipher = cipher_d->devcryptoid;
  144. cipher_ctx->sess.keylen = cipher_d->keylen;
  145. cipher_ctx->sess.key = (void *)key;
  146. cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT;
  147. cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE;
  148. cipher_ctx->blocksize = cipher_d->blocksize;
  149. if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) {
  150. SYSerr(SYS_F_IOCTL, errno);
  151. return 0;
  152. }
  153. return 1;
  154. }
  155. static int cipher_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
  156. const unsigned char *in, size_t inl)
  157. {
  158. struct cipher_ctx *cipher_ctx =
  159. (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
  160. struct crypt_op cryp;
  161. unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
  162. #if !defined(COP_FLAG_WRITE_IV)
  163. unsigned char saved_iv[EVP_MAX_IV_LENGTH];
  164. const unsigned char *ivptr;
  165. size_t nblocks, ivlen;
  166. #endif
  167. memset(&cryp, 0, sizeof(cryp));
  168. cryp.ses = cipher_ctx->sess.ses;
  169. cryp.len = inl;
  170. cryp.src = (void *)in;
  171. cryp.dst = (void *)out;
  172. cryp.iv = (void *)iv;
  173. cryp.op = cipher_ctx->op;
  174. #if !defined(COP_FLAG_WRITE_IV)
  175. cryp.flags = 0;
  176. ivlen = EVP_CIPHER_CTX_iv_length(ctx);
  177. if (ivlen > 0)
  178. switch (cipher_ctx->mode) {
  179. case EVP_CIPH_CBC_MODE:
  180. assert(inl >= ivlen);
  181. if (!EVP_CIPHER_CTX_encrypting(ctx)) {
  182. ivptr = in + inl - ivlen;
  183. memcpy(saved_iv, ivptr, ivlen);
  184. }
  185. break;
  186. case EVP_CIPH_CTR_MODE:
  187. break;
  188. default: /* should not happen */
  189. return 0;
  190. }
  191. #else
  192. cryp.flags = COP_FLAG_WRITE_IV;
  193. #endif
  194. if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) {
  195. SYSerr(SYS_F_IOCTL, errno);
  196. return 0;
  197. }
  198. #if !defined(COP_FLAG_WRITE_IV)
  199. if (ivlen > 0)
  200. switch (cipher_ctx->mode) {
  201. case EVP_CIPH_CBC_MODE:
  202. assert(inl >= ivlen);
  203. if (EVP_CIPHER_CTX_encrypting(ctx))
  204. ivptr = out + inl - ivlen;
  205. else
  206. ivptr = saved_iv;
  207. memcpy(iv, ivptr, ivlen);
  208. break;
  209. case EVP_CIPH_CTR_MODE:
  210. nblocks = (inl + cipher_ctx->blocksize - 1)
  211. / cipher_ctx->blocksize;
  212. do {
  213. ivlen--;
  214. nblocks += iv[ivlen];
  215. iv[ivlen] = (uint8_t) nblocks;
  216. nblocks >>= 8;
  217. } while (ivlen);
  218. break;
  219. default: /* should not happen */
  220. return 0;
  221. }
  222. #endif
  223. return 1;
  224. }
  225. static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
  226. const unsigned char *in, size_t inl)
  227. {
  228. struct cipher_ctx *cipher_ctx =
  229. (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
  230. size_t nblocks, len;
  231. /* initial partial block */
  232. while (cipher_ctx->num && inl) {
  233. (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num];
  234. --inl;
  235. cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize;
  236. }
  237. /* full blocks */
  238. if (inl > (unsigned int) cipher_ctx->blocksize) {
  239. nblocks = inl/cipher_ctx->blocksize;
  240. len = nblocks * cipher_ctx->blocksize;
  241. if (cipher_do_cipher(ctx, out, in, len) < 1)
  242. return 0;
  243. inl -= len;
  244. out += len;
  245. in += len;
  246. }
  247. /* final partial block */
  248. if (inl) {
  249. memset(cipher_ctx->partial, 0, cipher_ctx->blocksize);
  250. if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial,
  251. cipher_ctx->blocksize) < 1)
  252. return 0;
  253. while (inl--) {
  254. out[cipher_ctx->num] = in[cipher_ctx->num]
  255. ^ cipher_ctx->partial[cipher_ctx->num];
  256. cipher_ctx->num++;
  257. }
  258. }
  259. return 1;
  260. }
  261. static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2)
  262. {
  263. struct cipher_ctx *cipher_ctx =
  264. (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
  265. EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2;
  266. struct cipher_ctx *to_cipher_ctx;
  267. switch (type) {
  268. case EVP_CTRL_COPY:
  269. if (cipher_ctx == NULL)
  270. return 1;
  271. /* when copying the context, a new session needs to be initialized */
  272. to_cipher_ctx =
  273. (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(to_ctx);
  274. memset(&to_cipher_ctx->sess, 0, sizeof(to_cipher_ctx->sess));
  275. return cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx),
  276. (cipher_ctx->op == COP_ENCRYPT));
  277. case EVP_CTRL_INIT:
  278. memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess));
  279. return 1;
  280. default:
  281. break;
  282. }
  283. return -1;
  284. }
  285. static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
  286. {
  287. struct cipher_ctx *cipher_ctx =
  288. (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
  289. return clean_devcrypto_session(&cipher_ctx->sess);
  290. }
  291. /*
  292. * Keep a table of known nids and associated methods.
  293. * Note that known_cipher_nids[] isn't necessarily indexed the same way as
  294. * cipher_data[] above, which known_cipher_methods[] is.
  295. */
  296. static int known_cipher_nids[OSSL_NELEM(cipher_data)];
  297. static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
  298. static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
  299. static void prepare_cipher_methods(void)
  300. {
  301. size_t i;
  302. struct session_op sess;
  303. unsigned long cipher_mode;
  304. memset(&sess, 0, sizeof(sess));
  305. sess.key = (void *)"01234567890123456789012345678901234567890123456789";
  306. for (i = 0, known_cipher_nids_amount = 0;
  307. i < OSSL_NELEM(cipher_data); i++) {
  308. /*
  309. * Check that the algo is really availably by trying to open and close
  310. * a session.
  311. */
  312. sess.cipher = cipher_data[i].devcryptoid;
  313. sess.keylen = cipher_data[i].keylen;
  314. if (ioctl(cfd, CIOCGSESSION, &sess) < 0
  315. || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0)
  316. continue;
  317. cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
  318. if ((known_cipher_methods[i] =
  319. EVP_CIPHER_meth_new(cipher_data[i].nid,
  320. cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
  321. cipher_data[i].blocksize,
  322. cipher_data[i].keylen)) == NULL
  323. || !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i],
  324. cipher_data[i].ivlen)
  325. || !EVP_CIPHER_meth_set_flags(known_cipher_methods[i],
  326. cipher_data[i].flags
  327. | EVP_CIPH_CUSTOM_COPY
  328. | EVP_CIPH_CTRL_INIT
  329. | EVP_CIPH_FLAG_DEFAULT_ASN1)
  330. || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init)
  331. || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
  332. cipher_mode == EVP_CIPH_CTR_MODE ?
  333. ctr_do_cipher :
  334. cipher_do_cipher)
  335. || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl)
  336. || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
  337. cipher_cleanup)
  338. || !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i],
  339. sizeof(struct cipher_ctx))) {
  340. EVP_CIPHER_meth_free(known_cipher_methods[i]);
  341. known_cipher_methods[i] = NULL;
  342. } else {
  343. known_cipher_nids[known_cipher_nids_amount++] =
  344. cipher_data[i].nid;
  345. }
  346. }
  347. }
  348. static const EVP_CIPHER *get_cipher_method(int nid)
  349. {
  350. size_t i = get_cipher_data_index(nid);
  351. if (i == (size_t)-1)
  352. return NULL;
  353. return known_cipher_methods[i];
  354. }
  355. static int get_cipher_nids(const int **nids)
  356. {
  357. *nids = known_cipher_nids;
  358. return known_cipher_nids_amount;
  359. }
  360. static void destroy_cipher_method(int nid)
  361. {
  362. size_t i = get_cipher_data_index(nid);
  363. EVP_CIPHER_meth_free(known_cipher_methods[i]);
  364. known_cipher_methods[i] = NULL;
  365. }
  366. static void destroy_all_cipher_methods(void)
  367. {
  368. size_t i;
  369. for (i = 0; i < OSSL_NELEM(cipher_data); i++)
  370. destroy_cipher_method(cipher_data[i].nid);
  371. }
  372. static int devcrypto_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
  373. const int **nids, int nid)
  374. {
  375. if (cipher == NULL)
  376. return get_cipher_nids(nids);
  377. *cipher = get_cipher_method(nid);
  378. return *cipher != NULL;
  379. }
  380. /*
  381. * We only support digests if the cryptodev implementation supports multiple
  382. * data updates and session copying. Otherwise, we would be forced to maintain
  383. * a cache, which is perilous if there's a lot of data coming in (if someone
  384. * wants to checksum an OpenSSL tarball, for example).
  385. */
  386. #if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
  387. #define IMPLEMENT_DIGEST
  388. /******************************************************************************
  389. *
  390. * Digests
  391. *
  392. * Because they all do the same basic operation, we have only one set of
  393. * method functions for them all to share, and a mapping table between
  394. * NIDs and cryptodev IDs, with all the necessary size data.
  395. *
  396. *****/
  397. struct digest_ctx {
  398. struct session_op sess;
  399. /* This signals that the init function was called, not that it succeeded. */
  400. int init_called;
  401. };
  402. static const struct digest_data_st {
  403. int nid;
  404. int blocksize;
  405. int digestlen;
  406. int devcryptoid;
  407. } digest_data[] = {
  408. #ifndef OPENSSL_NO_MD5
  409. { NID_md5, /* MD5_CBLOCK */ 64, 16, CRYPTO_MD5 },
  410. #endif
  411. { NID_sha1, SHA_CBLOCK, 20, CRYPTO_SHA1 },
  412. #ifndef OPENSSL_NO_RMD160
  413. # if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_RIPEMD160)
  414. { NID_ripemd160, /* RIPEMD160_CBLOCK */ 64, 20, CRYPTO_RIPEMD160 },
  415. # endif
  416. #endif
  417. #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_224)
  418. { NID_sha224, SHA256_CBLOCK, 224 / 8, CRYPTO_SHA2_224 },
  419. #endif
  420. #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_256)
  421. { NID_sha256, SHA256_CBLOCK, 256 / 8, CRYPTO_SHA2_256 },
  422. #endif
  423. #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_384)
  424. { NID_sha384, SHA512_CBLOCK, 384 / 8, CRYPTO_SHA2_384 },
  425. #endif
  426. #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_512)
  427. { NID_sha512, SHA512_CBLOCK, 512 / 8, CRYPTO_SHA2_512 },
  428. #endif
  429. };
  430. static size_t get_digest_data_index(int nid)
  431. {
  432. size_t i;
  433. for (i = 0; i < OSSL_NELEM(digest_data); i++)
  434. if (nid == digest_data[i].nid)
  435. return i;
  436. /*
  437. * Code further down must make sure that only NIDs in the table above
  438. * are used. If any other NID reaches this function, there's a grave
  439. * coding error further down.
  440. */
  441. assert("Code that never should be reached" == NULL);
  442. return -1;
  443. }
  444. static const struct digest_data_st *get_digest_data(int nid)
  445. {
  446. return &digest_data[get_digest_data_index(nid)];
  447. }
  448. /*
  449. * Following are the four necessary functions to map OpenSSL functionality
  450. * with cryptodev.
  451. */
  452. static int digest_init(EVP_MD_CTX *ctx)
  453. {
  454. struct digest_ctx *digest_ctx =
  455. (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
  456. const struct digest_data_st *digest_d =
  457. get_digest_data(EVP_MD_CTX_type(ctx));
  458. digest_ctx->init_called = 1;
  459. memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess));
  460. digest_ctx->sess.mac = digest_d->devcryptoid;
  461. if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) {
  462. SYSerr(SYS_F_IOCTL, errno);
  463. return 0;
  464. }
  465. return 1;
  466. }
  467. static int digest_op(struct digest_ctx *ctx, const void *src, size_t srclen,
  468. void *res, unsigned int flags)
  469. {
  470. struct crypt_op cryp;
  471. memset(&cryp, 0, sizeof(cryp));
  472. cryp.ses = ctx->sess.ses;
  473. cryp.len = srclen;
  474. cryp.src = (void *)src;
  475. cryp.dst = NULL;
  476. cryp.mac = res;
  477. cryp.flags = flags;
  478. return ioctl(cfd, CIOCCRYPT, &cryp);
  479. }
  480. static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count)
  481. {
  482. struct digest_ctx *digest_ctx =
  483. (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
  484. if (count == 0)
  485. return 1;
  486. if (digest_ctx == NULL)
  487. return 0;
  488. if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) < 0) {
  489. SYSerr(SYS_F_IOCTL, errno);
  490. return 0;
  491. }
  492. return 1;
  493. }
  494. static int digest_final(EVP_MD_CTX *ctx, unsigned char *md)
  495. {
  496. struct digest_ctx *digest_ctx =
  497. (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
  498. if (md == NULL || digest_ctx == NULL)
  499. return 0;
  500. if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) {
  501. SYSerr(SYS_F_IOCTL, errno);
  502. return 0;
  503. }
  504. return 1;
  505. }
  506. static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from)
  507. {
  508. struct digest_ctx *digest_from =
  509. (struct digest_ctx *)EVP_MD_CTX_md_data(from);
  510. struct digest_ctx *digest_to =
  511. (struct digest_ctx *)EVP_MD_CTX_md_data(to);
  512. struct cphash_op cphash;
  513. if (digest_from == NULL || digest_from->init_called != 1)
  514. return 1;
  515. if (!digest_init(to)) {
  516. SYSerr(SYS_F_IOCTL, errno);
  517. return 0;
  518. }
  519. cphash.src_ses = digest_from->sess.ses;
  520. cphash.dst_ses = digest_to->sess.ses;
  521. if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) {
  522. SYSerr(SYS_F_IOCTL, errno);
  523. return 0;
  524. }
  525. return 1;
  526. }
  527. static int digest_cleanup(EVP_MD_CTX *ctx)
  528. {
  529. struct digest_ctx *digest_ctx =
  530. (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
  531. if (digest_ctx == NULL)
  532. return 1;
  533. return clean_devcrypto_session(&digest_ctx->sess);
  534. }
  535. static int devcrypto_test_digest(size_t digest_data_index)
  536. {
  537. struct session_op sess1, sess2;
  538. struct cphash_op cphash;
  539. int ret=0;
  540. memset(&sess1, 0, sizeof(sess1));
  541. memset(&sess2, 0, sizeof(sess2));
  542. sess1.mac = digest_data[digest_data_index].devcryptoid;
  543. if (ioctl(cfd, CIOCGSESSION, &sess1) < 0)
  544. return 0;
  545. /* Make sure the driver is capable of hash state copy */
  546. sess2.mac = sess1.mac;
  547. if (ioctl(cfd, CIOCGSESSION, &sess2) >= 0) {
  548. cphash.src_ses = sess1.ses;
  549. cphash.dst_ses = sess2.ses;
  550. if (ioctl(cfd, CIOCCPHASH, &cphash) >= 0)
  551. ret = 1;
  552. ioctl(cfd, CIOCFSESSION, &sess2.ses);
  553. }
  554. ioctl(cfd, CIOCFSESSION, &sess1.ses);
  555. return ret;
  556. }
  557. /*
  558. * Keep a table of known nids and associated methods.
  559. * Note that known_digest_nids[] isn't necessarily indexed the same way as
  560. * digest_data[] above, which known_digest_methods[] is.
  561. */
  562. static int known_digest_nids[OSSL_NELEM(digest_data)];
  563. static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */
  564. static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, };
  565. static void prepare_digest_methods(void)
  566. {
  567. size_t i;
  568. for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data);
  569. i++) {
  570. /*
  571. * Check that the algo is usable
  572. */
  573. if (!devcrypto_test_digest(i))
  574. continue;
  575. if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid,
  576. NID_undef)) == NULL
  577. || !EVP_MD_meth_set_input_blocksize(known_digest_methods[i],
  578. digest_data[i].blocksize)
  579. || !EVP_MD_meth_set_result_size(known_digest_methods[i],
  580. digest_data[i].digestlen)
  581. || !EVP_MD_meth_set_init(known_digest_methods[i], digest_init)
  582. || !EVP_MD_meth_set_update(known_digest_methods[i], digest_update)
  583. || !EVP_MD_meth_set_final(known_digest_methods[i], digest_final)
  584. || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy)
  585. || !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup)
  586. || !EVP_MD_meth_set_app_datasize(known_digest_methods[i],
  587. sizeof(struct digest_ctx))) {
  588. EVP_MD_meth_free(known_digest_methods[i]);
  589. known_digest_methods[i] = NULL;
  590. } else {
  591. known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid;
  592. }
  593. }
  594. }
  595. static const EVP_MD *get_digest_method(int nid)
  596. {
  597. size_t i = get_digest_data_index(nid);
  598. if (i == (size_t)-1)
  599. return NULL;
  600. return known_digest_methods[i];
  601. }
  602. static int get_digest_nids(const int **nids)
  603. {
  604. *nids = known_digest_nids;
  605. return known_digest_nids_amount;
  606. }
  607. static void destroy_digest_method(int nid)
  608. {
  609. size_t i = get_digest_data_index(nid);
  610. EVP_MD_meth_free(known_digest_methods[i]);
  611. known_digest_methods[i] = NULL;
  612. }
  613. static void destroy_all_digest_methods(void)
  614. {
  615. size_t i;
  616. for (i = 0; i < OSSL_NELEM(digest_data); i++)
  617. destroy_digest_method(digest_data[i].nid);
  618. }
  619. static int devcrypto_digests(ENGINE *e, const EVP_MD **digest,
  620. const int **nids, int nid)
  621. {
  622. if (digest == NULL)
  623. return get_digest_nids(nids);
  624. *digest = get_digest_method(nid);
  625. return *digest != NULL;
  626. }
  627. #endif
  628. /******************************************************************************
  629. *
  630. * LOAD / UNLOAD
  631. *
  632. *****/
  633. static int devcrypto_unload(ENGINE *e)
  634. {
  635. destroy_all_cipher_methods();
  636. #ifdef IMPLEMENT_DIGEST
  637. destroy_all_digest_methods();
  638. #endif
  639. close(cfd);
  640. return 1;
  641. }
  642. /*
  643. * This engine is always built into libcrypto, so it doesn't offer any
  644. * ability to be dynamically loadable.
  645. */
  646. void engine_load_devcrypto_int()
  647. {
  648. ENGINE *e = NULL;
  649. int fd;
  650. if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
  651. #ifndef ENGINE_DEVCRYPTO_DEBUG
  652. if (errno != ENOENT)
  653. #endif
  654. fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno));
  655. return;
  656. }
  657. #ifdef CRIOGET
  658. if (ioctl(fd, CRIOGET, &cfd) < 0) {
  659. fprintf(stderr, "Could not create crypto fd: %s\n", strerror(errno));
  660. close(fd);
  661. cfd = -1;
  662. return;
  663. }
  664. close(fd);
  665. #else
  666. cfd = fd;
  667. #endif
  668. if ((e = ENGINE_new()) == NULL
  669. || !ENGINE_set_destroy_function(e, devcrypto_unload)) {
  670. ENGINE_free(e);
  671. /*
  672. * We know that devcrypto_unload() won't be called when one of the
  673. * above two calls have failed, so we close cfd explicitly here to
  674. * avoid leaking resources.
  675. */
  676. close(cfd);
  677. return;
  678. }
  679. prepare_cipher_methods();
  680. #ifdef IMPLEMENT_DIGEST
  681. prepare_digest_methods();
  682. #endif
  683. if (!ENGINE_set_id(e, "devcrypto")
  684. || !ENGINE_set_name(e, "/dev/crypto engine")
  685. /*
  686. * Asymmetric ciphers aren't well supported with /dev/crypto. Among the BSD
  687. * implementations, it seems to only exist in FreeBSD, and regarding the
  688. * parameters in its crypt_kop, the manual crypto(4) has this to say:
  689. *
  690. * The semantics of these arguments are currently undocumented.
  691. *
  692. * Reading through the FreeBSD source code doesn't give much more than
  693. * their CRK_MOD_EXP implementation for ubsec.
  694. *
  695. * It doesn't look much better with cryptodev-linux. They have the crypt_kop
  696. * structure as well as the command (CRK_*) in cryptodev.h, but no support
  697. * seems to be implemented at all for the moment.
  698. *
  699. * At the time of writing, it seems impossible to write proper support for
  700. * FreeBSD's asym features without some very deep knowledge and access to
  701. * specific kernel modules.
  702. *
  703. * /Richard Levitte, 2017-05-11
  704. */
  705. #if 0
  706. # ifndef OPENSSL_NO_RSA
  707. || !ENGINE_set_RSA(e, devcrypto_rsa)
  708. # endif
  709. # ifndef OPENSSL_NO_DSA
  710. || !ENGINE_set_DSA(e, devcrypto_dsa)
  711. # endif
  712. # ifndef OPENSSL_NO_DH
  713. || !ENGINE_set_DH(e, devcrypto_dh)
  714. # endif
  715. # ifndef OPENSSL_NO_EC
  716. || !ENGINE_set_EC(e, devcrypto_ec)
  717. # endif
  718. #endif
  719. || !ENGINE_set_ciphers(e, devcrypto_ciphers)
  720. #ifdef IMPLEMENT_DIGEST
  721. || !ENGINE_set_digests(e, devcrypto_digests)
  722. #endif
  723. ) {
  724. ENGINE_free(e);
  725. return;
  726. }
  727. ENGINE_add(e);
  728. ENGINE_free(e); /* Loose our local reference */
  729. ERR_clear_error();
  730. }