2 * RSA padding templates.
4 * Copyright (c) 2015 Intel Corporation
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <crypto/algapi.h>
13 #include <crypto/akcipher.h>
14 #include <crypto/internal/akcipher.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/random.h>
22 * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
24 static const u8 rsa_digest_info_md5[] = {
25 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
26 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
27 0x05, 0x00, 0x04, 0x10
30 static const u8 rsa_digest_info_sha1[] = {
31 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
32 0x2b, 0x0e, 0x03, 0x02, 0x1a,
33 0x05, 0x00, 0x04, 0x14
36 static const u8 rsa_digest_info_rmd160[] = {
37 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
38 0x2b, 0x24, 0x03, 0x02, 0x01,
39 0x05, 0x00, 0x04, 0x14
42 static const u8 rsa_digest_info_sha224[] = {
43 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
44 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
45 0x05, 0x00, 0x04, 0x1c
48 static const u8 rsa_digest_info_sha256[] = {
49 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
50 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
51 0x05, 0x00, 0x04, 0x20
54 static const u8 rsa_digest_info_sha384[] = {
55 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
56 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
57 0x05, 0x00, 0x04, 0x30
60 static const u8 rsa_digest_info_sha512[] = {
61 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
62 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
63 0x05, 0x00, 0x04, 0x40
66 static const struct rsa_asn1_template {
70 } rsa_asn1_templates[] = {
71 #define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
83 static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
85 const struct rsa_asn1_template *p;
87 for (p = rsa_asn1_templates; p->name; p++)
88 if (strcmp(name, p->name) == 0)
94 struct crypto_akcipher *child;
95 unsigned int key_size;
98 struct pkcs1pad_inst_ctx {
99 struct crypto_akcipher_spawn spawn;
100 const struct rsa_asn1_template *digest_info;
103 struct pkcs1pad_request {
104 struct akcipher_request child_req;
106 struct scatterlist in_sg[3], out_sg[2];
107 uint8_t *in_buf, *out_buf;
110 static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
113 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
116 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
119 /* Find out new modulus size from rsa implementation */
120 size = crypto_akcipher_maxsize(ctx->child);
122 ctx->key_size = size > 0 ? size : 0;
130 static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
133 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
136 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
139 /* Find out new modulus size from rsa implementation */
140 size = crypto_akcipher_maxsize(ctx->child);
142 ctx->key_size = size > 0 ? size : 0;
150 static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
152 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
155 * The maximum destination buffer size for the encrypt/sign operations
156 * will be the same as for RSA, even though it's smaller for
160 return ctx->key_size ?: -EINVAL;
163 static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
164 struct scatterlist *next)
166 int nsegs = next ? 1 : 0;
168 if (offset_in_page(buf) + len <= PAGE_SIZE) {
170 sg_init_table(sg, nsegs);
171 sg_set_buf(sg, buf, len);
174 sg_init_table(sg, nsegs);
175 sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
176 sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
177 offset_in_page(buf) + len - PAGE_SIZE);
181 sg_chain(sg, nsegs, next);
184 static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
186 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
187 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
188 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
189 size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
190 size_t chunk_len, pad_left;
191 struct sg_mapping_iter miter;
195 sg_miter_start(&miter, req->dst,
196 sg_nents_for_len(req->dst, pad_len),
197 SG_MITER_ATOMIC | SG_MITER_TO_SG);
201 sg_miter_next(&miter);
203 chunk_len = min(miter.length, pad_left);
204 memset(miter.addr, 0, chunk_len);
205 pad_left -= chunk_len;
208 sg_miter_stop(&miter);
211 sg_pcopy_from_buffer(req->dst,
212 sg_nents_for_len(req->dst, ctx->key_size),
213 req_ctx->out_buf, req_ctx->child_req.dst_len,
216 req->dst_len = ctx->key_size;
218 kfree(req_ctx->in_buf);
219 kzfree(req_ctx->out_buf);
224 static void pkcs1pad_encrypt_sign_complete_cb(
225 struct crypto_async_request *child_async_req, int err)
227 struct akcipher_request *req = child_async_req->data;
228 struct crypto_async_request async_req;
230 if (err == -EINPROGRESS)
233 async_req.data = req->base.data;
234 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
235 async_req.flags = child_async_req->flags;
236 req->base.complete(&async_req,
237 pkcs1pad_encrypt_sign_complete(req, err));
240 static int pkcs1pad_encrypt(struct akcipher_request *req)
242 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
243 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
244 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
246 unsigned int i, ps_end;
251 if (req->src_len > ctx->key_size - 11)
254 if (req->dst_len < ctx->key_size) {
255 req->dst_len = ctx->key_size;
259 if (ctx->key_size > PAGE_SIZE)
263 * Replace both input and output to add the padding in the input and
264 * the potential missing leading zeros in the output.
266 req_ctx->child_req.src = req_ctx->in_sg;
267 req_ctx->child_req.src_len = ctx->key_size - 1;
268 req_ctx->child_req.dst = req_ctx->out_sg;
269 req_ctx->child_req.dst_len = ctx->key_size;
271 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
272 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
273 GFP_KERNEL : GFP_ATOMIC);
274 if (!req_ctx->in_buf)
277 ps_end = ctx->key_size - req->src_len - 2;
278 req_ctx->in_buf[0] = 0x02;
279 for (i = 1; i < ps_end; i++)
280 req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
281 req_ctx->in_buf[ps_end] = 0x00;
283 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
284 ctx->key_size - 1 - req->src_len, req->src);
286 req_ctx->out_buf = kmalloc(ctx->key_size,
287 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
288 GFP_KERNEL : GFP_ATOMIC);
289 if (!req_ctx->out_buf) {
290 kfree(req_ctx->in_buf);
294 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
295 ctx->key_size, NULL);
297 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
298 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
299 pkcs1pad_encrypt_sign_complete_cb, req);
301 err = crypto_akcipher_encrypt(&req_ctx->child_req);
302 if (err != -EINPROGRESS &&
304 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
305 return pkcs1pad_encrypt_sign_complete(req, err);
310 static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
312 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
313 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
314 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
317 if (err == -EOVERFLOW)
318 /* Decrypted value had no leading 0 byte */
324 if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
329 if (req_ctx->out_buf[0] != 0x02) {
333 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
334 if (req_ctx->out_buf[pos] == 0x00)
336 if (pos < 9 || pos == req_ctx->child_req.dst_len) {
342 if (req->dst_len < req_ctx->child_req.dst_len - pos)
344 req->dst_len = req_ctx->child_req.dst_len - pos;
347 sg_copy_from_buffer(req->dst,
348 sg_nents_for_len(req->dst, req->dst_len),
349 req_ctx->out_buf + pos, req->dst_len);
352 kzfree(req_ctx->out_buf);
357 static void pkcs1pad_decrypt_complete_cb(
358 struct crypto_async_request *child_async_req, int err)
360 struct akcipher_request *req = child_async_req->data;
361 struct crypto_async_request async_req;
363 if (err == -EINPROGRESS)
366 async_req.data = req->base.data;
367 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
368 async_req.flags = child_async_req->flags;
369 req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
372 static int pkcs1pad_decrypt(struct akcipher_request *req)
374 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
375 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
376 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
379 if (!ctx->key_size || req->src_len != ctx->key_size)
382 if (ctx->key_size > PAGE_SIZE)
385 /* Reuse input buffer, output to a new buffer */
386 req_ctx->child_req.src = req->src;
387 req_ctx->child_req.src_len = req->src_len;
388 req_ctx->child_req.dst = req_ctx->out_sg;
389 req_ctx->child_req.dst_len = ctx->key_size ;
391 req_ctx->out_buf = kmalloc(ctx->key_size,
392 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
393 GFP_KERNEL : GFP_ATOMIC);
394 if (!req_ctx->out_buf)
397 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
398 ctx->key_size, NULL);
400 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
401 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
402 pkcs1pad_decrypt_complete_cb, req);
404 err = crypto_akcipher_decrypt(&req_ctx->child_req);
405 if (err != -EINPROGRESS &&
407 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
408 return pkcs1pad_decrypt_complete(req, err);
413 static int pkcs1pad_sign(struct akcipher_request *req)
415 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
416 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
417 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
418 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
419 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
420 const struct rsa_asn1_template *digest_info = ictx->digest_info;
422 unsigned int ps_end, digest_size = 0;
427 digest_size = digest_info->size;
429 if (req->src_len + digest_size > ctx->key_size - 11)
432 if (req->dst_len < ctx->key_size) {
433 req->dst_len = ctx->key_size;
437 if (ctx->key_size > PAGE_SIZE)
441 * Replace both input and output to add the padding in the input and
442 * the potential missing leading zeros in the output.
444 req_ctx->child_req.src = req_ctx->in_sg;
445 req_ctx->child_req.src_len = ctx->key_size - 1;
446 req_ctx->child_req.dst = req_ctx->out_sg;
447 req_ctx->child_req.dst_len = ctx->key_size;
449 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
450 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
451 GFP_KERNEL : GFP_ATOMIC);
452 if (!req_ctx->in_buf)
455 ps_end = ctx->key_size - digest_size - req->src_len - 2;
456 req_ctx->in_buf[0] = 0x01;
457 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
458 req_ctx->in_buf[ps_end] = 0x00;
460 memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
463 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
464 ctx->key_size - 1 - req->src_len, req->src);
466 req_ctx->out_buf = kmalloc(ctx->key_size,
467 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
468 GFP_KERNEL : GFP_ATOMIC);
469 if (!req_ctx->out_buf) {
470 kfree(req_ctx->in_buf);
474 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
475 ctx->key_size, NULL);
477 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
478 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
479 pkcs1pad_encrypt_sign_complete_cb, req);
481 err = crypto_akcipher_sign(&req_ctx->child_req);
482 if (err != -EINPROGRESS &&
484 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
485 return pkcs1pad_encrypt_sign_complete(req, err);
490 static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
492 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
493 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
494 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
495 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
496 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
497 const struct rsa_asn1_template *digest_info = ictx->digest_info;
500 if (err == -EOVERFLOW)
501 /* Decrypted value had no leading 0 byte */
507 if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
513 if (req_ctx->out_buf[0] != 0x01)
516 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
517 if (req_ctx->out_buf[pos] != 0xff)
520 if (pos < 9 || pos == req_ctx->child_req.dst_len ||
521 req_ctx->out_buf[pos] != 0x00)
525 if (memcmp(req_ctx->out_buf + pos, digest_info->data,
529 pos += digest_info->size;
533 if (req->dst_len < req_ctx->child_req.dst_len - pos)
535 req->dst_len = req_ctx->child_req.dst_len - pos;
538 sg_copy_from_buffer(req->dst,
539 sg_nents_for_len(req->dst, req->dst_len),
540 req_ctx->out_buf + pos, req->dst_len);
542 kzfree(req_ctx->out_buf);
547 static void pkcs1pad_verify_complete_cb(
548 struct crypto_async_request *child_async_req, int err)
550 struct akcipher_request *req = child_async_req->data;
551 struct crypto_async_request async_req;
553 if (err == -EINPROGRESS)
556 async_req.data = req->base.data;
557 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
558 async_req.flags = child_async_req->flags;
559 req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
563 * The verify operation is here for completeness similar to the verification
564 * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
565 * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
566 * retrieve the DigestInfo from a signature, instead the user is expected
567 * to call the sign operation to generate the expected signature and compare
568 * signatures instead of the message-digests.
570 static int pkcs1pad_verify(struct akcipher_request *req)
572 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
573 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
574 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
577 if (!ctx->key_size || req->src_len < ctx->key_size)
580 if (ctx->key_size > PAGE_SIZE)
583 /* Reuse input buffer, output to a new buffer */
584 req_ctx->child_req.src = req->src;
585 req_ctx->child_req.src_len = req->src_len;
586 req_ctx->child_req.dst = req_ctx->out_sg;
587 req_ctx->child_req.dst_len = ctx->key_size;
589 req_ctx->out_buf = kmalloc(ctx->key_size,
590 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
591 GFP_KERNEL : GFP_ATOMIC);
592 if (!req_ctx->out_buf)
595 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
596 ctx->key_size, NULL);
598 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
599 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
600 pkcs1pad_verify_complete_cb, req);
602 err = crypto_akcipher_verify(&req_ctx->child_req);
603 if (err != -EINPROGRESS &&
605 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
606 return pkcs1pad_verify_complete(req, err);
611 static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
613 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
614 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
615 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
616 struct crypto_akcipher *child_tfm;
618 child_tfm = crypto_spawn_akcipher(&ictx->spawn);
619 if (IS_ERR(child_tfm))
620 return PTR_ERR(child_tfm);
622 ctx->child = child_tfm;
626 static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
628 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
630 crypto_free_akcipher(ctx->child);
633 static void pkcs1pad_free(struct akcipher_instance *inst)
635 struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
636 struct crypto_akcipher_spawn *spawn = &ctx->spawn;
638 crypto_drop_akcipher(spawn);
642 static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
644 const struct rsa_asn1_template *digest_info;
645 struct crypto_attr_type *algt;
646 struct akcipher_instance *inst;
647 struct pkcs1pad_inst_ctx *ctx;
648 struct crypto_akcipher_spawn *spawn;
649 struct akcipher_alg *rsa_alg;
650 const char *rsa_alg_name;
651 const char *hash_name;
654 algt = crypto_get_attr_type(tb);
656 return PTR_ERR(algt);
658 if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
661 rsa_alg_name = crypto_attr_alg_name(tb[1]);
662 if (IS_ERR(rsa_alg_name))
663 return PTR_ERR(rsa_alg_name);
665 hash_name = crypto_attr_alg_name(tb[2]);
666 if (IS_ERR(hash_name))
667 return PTR_ERR(hash_name);
669 digest_info = rsa_lookup_asn1(hash_name);
673 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
677 ctx = akcipher_instance_ctx(inst);
679 ctx->digest_info = digest_info;
681 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
682 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
683 crypto_requires_sync(algt->type, algt->mask));
687 rsa_alg = crypto_spawn_akcipher_alg(spawn);
691 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
692 "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
693 CRYPTO_MAX_ALG_NAME ||
694 snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
696 rsa_alg->base.cra_driver_name, hash_name) >=
700 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
701 inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
702 inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
704 inst->alg.init = pkcs1pad_init_tfm;
705 inst->alg.exit = pkcs1pad_exit_tfm;
707 inst->alg.encrypt = pkcs1pad_encrypt;
708 inst->alg.decrypt = pkcs1pad_decrypt;
709 inst->alg.sign = pkcs1pad_sign;
710 inst->alg.verify = pkcs1pad_verify;
711 inst->alg.set_pub_key = pkcs1pad_set_pub_key;
712 inst->alg.set_priv_key = pkcs1pad_set_priv_key;
713 inst->alg.max_size = pkcs1pad_get_max_size;
714 inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
716 inst->free = pkcs1pad_free;
718 err = akcipher_register_instance(tmpl, inst);
725 crypto_drop_akcipher(spawn);
731 struct crypto_template rsa_pkcs1pad_tmpl = {
733 .create = pkcs1pad_create,
734 .module = THIS_MODULE,