crypto: hash - Pull out the functions to save/restore request
[cascardo/linux.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26
27 #include "internal.h"
28
29 struct ahash_request_priv {
30         crypto_completion_t complete;
31         void *data;
32         u8 *result;
33         void *ubuf[] CRYPTO_MINALIGN_ATTR;
34 };
35
36 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 {
38         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39                             halg);
40 }
41
42 static int hash_walk_next(struct crypto_hash_walk *walk)
43 {
44         unsigned int alignmask = walk->alignmask;
45         unsigned int offset = walk->offset;
46         unsigned int nbytes = min(walk->entrylen,
47                                   ((unsigned int)(PAGE_SIZE)) - offset);
48
49         walk->data = kmap_atomic(walk->pg);
50         walk->data += offset;
51
52         if (offset & alignmask) {
53                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
54                 if (nbytes > unaligned)
55                         nbytes = unaligned;
56         }
57
58         walk->entrylen -= nbytes;
59         return nbytes;
60 }
61
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64         struct scatterlist *sg;
65
66         sg = walk->sg;
67         walk->pg = sg_page(sg);
68         walk->offset = sg->offset;
69         walk->entrylen = sg->length;
70
71         if (walk->entrylen > walk->total)
72                 walk->entrylen = walk->total;
73         walk->total -= walk->entrylen;
74
75         return hash_walk_next(walk);
76 }
77
78 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
79 {
80         unsigned int alignmask = walk->alignmask;
81         unsigned int nbytes = walk->entrylen;
82
83         walk->data -= walk->offset;
84
85         if (nbytes && walk->offset & alignmask && !err) {
86                 walk->offset = ALIGN(walk->offset, alignmask + 1);
87                 walk->data += walk->offset;
88
89                 nbytes = min(nbytes,
90                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
91                 walk->entrylen -= nbytes;
92
93                 return nbytes;
94         }
95
96         kunmap_atomic(walk->data);
97         crypto_yield(walk->flags);
98
99         if (err)
100                 return err;
101
102         if (nbytes) {
103                 walk->offset = 0;
104                 walk->pg++;
105                 return hash_walk_next(walk);
106         }
107
108         if (!walk->total)
109                 return 0;
110
111         walk->sg = scatterwalk_sg_next(walk->sg);
112
113         return hash_walk_new_entry(walk);
114 }
115 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
116
117 int crypto_hash_walk_first(struct ahash_request *req,
118                            struct crypto_hash_walk *walk)
119 {
120         walk->total = req->nbytes;
121
122         if (!walk->total)
123                 return 0;
124
125         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
126         walk->sg = req->src;
127         walk->flags = req->base.flags;
128
129         return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
132
133 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
134                                   struct crypto_hash_walk *walk,
135                                   struct scatterlist *sg, unsigned int len)
136 {
137         walk->total = len;
138
139         if (!walk->total)
140                 return 0;
141
142         walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
143         walk->sg = sg;
144         walk->flags = hdesc->flags;
145
146         return hash_walk_new_entry(walk);
147 }
148
149 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
150                                 unsigned int keylen)
151 {
152         unsigned long alignmask = crypto_ahash_alignmask(tfm);
153         int ret;
154         u8 *buffer, *alignbuffer;
155         unsigned long absize;
156
157         absize = keylen + alignmask;
158         buffer = kmalloc(absize, GFP_KERNEL);
159         if (!buffer)
160                 return -ENOMEM;
161
162         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
163         memcpy(alignbuffer, key, keylen);
164         ret = tfm->setkey(tfm, alignbuffer, keylen);
165         kzfree(buffer);
166         return ret;
167 }
168
169 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
170                         unsigned int keylen)
171 {
172         unsigned long alignmask = crypto_ahash_alignmask(tfm);
173
174         if ((unsigned long)key & alignmask)
175                 return ahash_setkey_unaligned(tfm, key, keylen);
176
177         return tfm->setkey(tfm, key, keylen);
178 }
179 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
180
181 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
182                           unsigned int keylen)
183 {
184         return -ENOSYS;
185 }
186
187 static inline unsigned int ahash_align_buffer_size(unsigned len,
188                                                    unsigned long mask)
189 {
190         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
191 }
192
193 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
194 {
195         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
196         unsigned long alignmask = crypto_ahash_alignmask(tfm);
197         unsigned int ds = crypto_ahash_digestsize(tfm);
198         struct ahash_request_priv *priv;
199
200         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
201                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
202                        GFP_KERNEL : GFP_ATOMIC);
203         if (!priv)
204                 return -ENOMEM;
205
206         /*
207          * WARNING: Voodoo programming below!
208          *
209          * The code below is obscure and hard to understand, thus explanation
210          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
211          * to understand the layout of structures used here!
212          *
213          * The code here will replace portions of the ORIGINAL request with
214          * pointers to new code and buffers so the hashing operation can store
215          * the result in aligned buffer. We will call the modified request
216          * an ADJUSTED request.
217          *
218          * The newly mangled request will look as such:
219          *
220          * req {
221          *   .result        = ADJUSTED[new aligned buffer]
222          *   .base.complete = ADJUSTED[pointer to completion function]
223          *   .base.data     = ADJUSTED[*req (pointer to self)]
224          *   .priv          = ADJUSTED[new priv] {
225          *           .result   = ORIGINAL(result)
226          *           .complete = ORIGINAL(base.complete)
227          *           .data     = ORIGINAL(base.data)
228          *   }
229          */
230
231         priv->result = req->result;
232         priv->complete = req->base.complete;
233         priv->data = req->base.data;
234         /*
235          * WARNING: We do not backup req->priv here! The req->priv
236          *          is for internal use of the Crypto API and the
237          *          user must _NOT_ _EVER_ depend on it's content!
238          */
239
240         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
241         req->base.complete = cplt;
242         req->base.data = req;
243         req->priv = priv;
244
245         return 0;
246 }
247
248 static void ahash_restore_req(struct ahash_request *req)
249 {
250         struct ahash_request_priv *priv = req->priv;
251
252         /* Restore the original crypto request. */
253         req->result = priv->result;
254         req->base.complete = priv->complete;
255         req->base.data = priv->data;
256         req->priv = NULL;
257
258         /* Free the req->priv.priv from the ADJUSTED request. */
259         kzfree(priv);
260 }
261
262 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
263 {
264         struct ahash_request_priv *priv = req->priv;
265
266         if (err == -EINPROGRESS)
267                 return;
268
269         if (!err)
270                 memcpy(priv->result, req->result,
271                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
272
273         ahash_restore_req(req);
274 }
275
276 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
277 {
278         struct ahash_request *areq = req->data;
279
280         /*
281          * Restore the original request, see ahash_op_unaligned() for what
282          * goes where.
283          *
284          * The "struct ahash_request *req" here is in fact the "req.base"
285          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
286          * is a pointer to self, it is also the ADJUSTED "req" .
287          */
288
289         /* First copy req->result into req->priv.result */
290         ahash_op_unaligned_finish(areq, err);
291
292         /* Complete the ORIGINAL request. */
293         areq->base.complete(&areq->base, err);
294 }
295
296 static int ahash_op_unaligned(struct ahash_request *req,
297                               int (*op)(struct ahash_request *))
298 {
299         int err;
300
301         err = ahash_save_req(req, ahash_op_unaligned_done);
302         if (err)
303                 return err;
304
305         err = op(req);
306         ahash_op_unaligned_finish(req, err);
307
308         return err;
309 }
310
311 static int crypto_ahash_op(struct ahash_request *req,
312                            int (*op)(struct ahash_request *))
313 {
314         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
315         unsigned long alignmask = crypto_ahash_alignmask(tfm);
316
317         if ((unsigned long)req->result & alignmask)
318                 return ahash_op_unaligned(req, op);
319
320         return op(req);
321 }
322
323 int crypto_ahash_final(struct ahash_request *req)
324 {
325         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
326 }
327 EXPORT_SYMBOL_GPL(crypto_ahash_final);
328
329 int crypto_ahash_finup(struct ahash_request *req)
330 {
331         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
332 }
333 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
334
335 int crypto_ahash_digest(struct ahash_request *req)
336 {
337         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
338 }
339 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
340
341 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
342 {
343         struct ahash_request_priv *priv = req->priv;
344
345         if (err == -EINPROGRESS)
346                 return;
347
348         if (!err)
349                 memcpy(priv->result, req->result,
350                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
351
352         kzfree(priv);
353 }
354
355 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
356 {
357         struct ahash_request *areq = req->data;
358         struct ahash_request_priv *priv = areq->priv;
359         crypto_completion_t complete = priv->complete;
360         void *data = priv->data;
361
362         ahash_def_finup_finish2(areq, err);
363
364         complete(data, err);
365 }
366
367 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
368 {
369         if (err)
370                 goto out;
371
372         req->base.complete = ahash_def_finup_done2;
373         req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
374         err = crypto_ahash_reqtfm(req)->final(req);
375
376 out:
377         ahash_def_finup_finish2(req, err);
378         return err;
379 }
380
381 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
382 {
383         struct ahash_request *areq = req->data;
384         struct ahash_request_priv *priv = areq->priv;
385         crypto_completion_t complete = priv->complete;
386         void *data = priv->data;
387
388         err = ahash_def_finup_finish1(areq, err);
389
390         complete(data, err);
391 }
392
393 static int ahash_def_finup(struct ahash_request *req)
394 {
395         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396         unsigned long alignmask = crypto_ahash_alignmask(tfm);
397         unsigned int ds = crypto_ahash_digestsize(tfm);
398         struct ahash_request_priv *priv;
399
400         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
401                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
402                        GFP_KERNEL : GFP_ATOMIC);
403         if (!priv)
404                 return -ENOMEM;
405
406         priv->result = req->result;
407         priv->complete = req->base.complete;
408         priv->data = req->base.data;
409
410         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
411         req->base.complete = ahash_def_finup_done1;
412         req->base.data = req;
413         req->priv = priv;
414
415         return ahash_def_finup_finish1(req, tfm->update(req));
416 }
417
418 static int ahash_no_export(struct ahash_request *req, void *out)
419 {
420         return -ENOSYS;
421 }
422
423 static int ahash_no_import(struct ahash_request *req, const void *in)
424 {
425         return -ENOSYS;
426 }
427
428 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
429 {
430         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
431         struct ahash_alg *alg = crypto_ahash_alg(hash);
432
433         hash->setkey = ahash_nosetkey;
434         hash->export = ahash_no_export;
435         hash->import = ahash_no_import;
436
437         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
438                 return crypto_init_shash_ops_async(tfm);
439
440         hash->init = alg->init;
441         hash->update = alg->update;
442         hash->final = alg->final;
443         hash->finup = alg->finup ?: ahash_def_finup;
444         hash->digest = alg->digest;
445
446         if (alg->setkey)
447                 hash->setkey = alg->setkey;
448         if (alg->export)
449                 hash->export = alg->export;
450         if (alg->import)
451                 hash->import = alg->import;
452
453         return 0;
454 }
455
456 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
457 {
458         if (alg->cra_type == &crypto_ahash_type)
459                 return alg->cra_ctxsize;
460
461         return sizeof(struct crypto_shash *);
462 }
463
464 #ifdef CONFIG_NET
465 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
466 {
467         struct crypto_report_hash rhash;
468
469         strncpy(rhash.type, "ahash", sizeof(rhash.type));
470
471         rhash.blocksize = alg->cra_blocksize;
472         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
473
474         if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
475                     sizeof(struct crypto_report_hash), &rhash))
476                 goto nla_put_failure;
477         return 0;
478
479 nla_put_failure:
480         return -EMSGSIZE;
481 }
482 #else
483 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
484 {
485         return -ENOSYS;
486 }
487 #endif
488
489 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
490         __attribute__ ((unused));
491 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
492 {
493         seq_printf(m, "type         : ahash\n");
494         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
495                                              "yes" : "no");
496         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
497         seq_printf(m, "digestsize   : %u\n",
498                    __crypto_hash_alg_common(alg)->digestsize);
499 }
500
501 const struct crypto_type crypto_ahash_type = {
502         .extsize = crypto_ahash_extsize,
503         .init_tfm = crypto_ahash_init_tfm,
504 #ifdef CONFIG_PROC_FS
505         .show = crypto_ahash_show,
506 #endif
507         .report = crypto_ahash_report,
508         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
509         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
510         .type = CRYPTO_ALG_TYPE_AHASH,
511         .tfmsize = offsetof(struct crypto_ahash, base),
512 };
513 EXPORT_SYMBOL_GPL(crypto_ahash_type);
514
515 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
516                                         u32 mask)
517 {
518         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
519 }
520 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
521
522 static int ahash_prepare_alg(struct ahash_alg *alg)
523 {
524         struct crypto_alg *base = &alg->halg.base;
525
526         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
527             alg->halg.statesize > PAGE_SIZE / 8)
528                 return -EINVAL;
529
530         base->cra_type = &crypto_ahash_type;
531         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
532         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
533
534         return 0;
535 }
536
537 int crypto_register_ahash(struct ahash_alg *alg)
538 {
539         struct crypto_alg *base = &alg->halg.base;
540         int err;
541
542         err = ahash_prepare_alg(alg);
543         if (err)
544                 return err;
545
546         return crypto_register_alg(base);
547 }
548 EXPORT_SYMBOL_GPL(crypto_register_ahash);
549
550 int crypto_unregister_ahash(struct ahash_alg *alg)
551 {
552         return crypto_unregister_alg(&alg->halg.base);
553 }
554 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
555
556 int ahash_register_instance(struct crypto_template *tmpl,
557                             struct ahash_instance *inst)
558 {
559         int err;
560
561         err = ahash_prepare_alg(&inst->alg);
562         if (err)
563                 return err;
564
565         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
566 }
567 EXPORT_SYMBOL_GPL(ahash_register_instance);
568
569 void ahash_free_instance(struct crypto_instance *inst)
570 {
571         crypto_drop_spawn(crypto_instance_ctx(inst));
572         kfree(ahash_instance(inst));
573 }
574 EXPORT_SYMBOL_GPL(ahash_free_instance);
575
576 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
577                             struct hash_alg_common *alg,
578                             struct crypto_instance *inst)
579 {
580         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
581                                   &crypto_ahash_type);
582 }
583 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
584
585 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
586 {
587         struct crypto_alg *alg;
588
589         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
590         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
591 }
592 EXPORT_SYMBOL_GPL(ahash_attr_alg);
593
594 MODULE_LICENSE("GPL");
595 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");