|
29 | 29 |
|
30 | 30 | static const struct crypto_type crypto_ahash_type;
|
31 | 31 |
|
| 32 | +static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 33 | + unsigned int keylen) |
| 34 | +{ |
| 35 | + struct crypto_shash **ctx = crypto_ahash_ctx(tfm); |
| 36 | + |
| 37 | + return crypto_shash_setkey(*ctx, key, keylen); |
| 38 | +} |
| 39 | + |
| 40 | +static int shash_async_init(struct ahash_request *req) |
| 41 | +{ |
| 42 | + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
| 43 | + struct shash_desc *desc = ahash_request_ctx(req); |
| 44 | + |
| 45 | + desc->tfm = *ctx; |
| 46 | + |
| 47 | + return crypto_shash_init(desc); |
| 48 | +} |
| 49 | + |
| 50 | +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) |
| 51 | +{ |
| 52 | + struct crypto_hash_walk walk; |
| 53 | + int nbytes; |
| 54 | + |
| 55 | + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; |
| 56 | + nbytes = crypto_hash_walk_done(&walk, nbytes)) |
| 57 | + nbytes = crypto_shash_update(desc, walk.data, nbytes); |
| 58 | + |
| 59 | + return nbytes; |
| 60 | +} |
| 61 | +EXPORT_SYMBOL_GPL(shash_ahash_update); |
| 62 | + |
| 63 | +static int shash_async_update(struct ahash_request *req) |
| 64 | +{ |
| 65 | + return shash_ahash_update(req, ahash_request_ctx(req)); |
| 66 | +} |
| 67 | + |
| 68 | +static int shash_async_final(struct ahash_request *req) |
| 69 | +{ |
| 70 | + return crypto_shash_final(ahash_request_ctx(req), req->result); |
| 71 | +} |
| 72 | + |
| 73 | +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) |
| 74 | +{ |
| 75 | + struct crypto_hash_walk walk; |
| 76 | + int nbytes; |
| 77 | + |
| 78 | + nbytes = crypto_hash_walk_first(req, &walk); |
| 79 | + if (!nbytes) |
| 80 | + return crypto_shash_final(desc, req->result); |
| 81 | + |
| 82 | + do { |
| 83 | + nbytes = crypto_hash_walk_last(&walk) ? |
| 84 | + crypto_shash_finup(desc, walk.data, nbytes, |
| 85 | + req->result) : |
| 86 | + crypto_shash_update(desc, walk.data, nbytes); |
| 87 | + nbytes = crypto_hash_walk_done(&walk, nbytes); |
| 88 | + } while (nbytes > 0); |
| 89 | + |
| 90 | + return nbytes; |
| 91 | +} |
| 92 | +EXPORT_SYMBOL_GPL(shash_ahash_finup); |
| 93 | + |
| 94 | +static int shash_async_finup(struct ahash_request *req) |
| 95 | +{ |
| 96 | + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
| 97 | + struct shash_desc *desc = ahash_request_ctx(req); |
| 98 | + |
| 99 | + desc->tfm = *ctx; |
| 100 | + |
| 101 | + return shash_ahash_finup(req, desc); |
| 102 | +} |
| 103 | + |
| 104 | +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) |
| 105 | +{ |
| 106 | + unsigned int nbytes = req->nbytes; |
| 107 | + struct scatterlist *sg; |
| 108 | + unsigned int offset; |
| 109 | + int err; |
| 110 | + |
| 111 | + if (nbytes && |
| 112 | + (sg = req->src, offset = sg->offset, |
| 113 | + nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { |
| 114 | + void *data; |
| 115 | + |
| 116 | + data = kmap_local_page(sg_page(sg)); |
| 117 | + err = crypto_shash_digest(desc, data + offset, nbytes, |
| 118 | + req->result); |
| 119 | + kunmap_local(data); |
| 120 | + } else |
| 121 | + err = crypto_shash_init(desc) ?: |
| 122 | + shash_ahash_finup(req, desc); |
| 123 | + |
| 124 | + return err; |
| 125 | +} |
| 126 | +EXPORT_SYMBOL_GPL(shash_ahash_digest); |
| 127 | + |
| 128 | +static int shash_async_digest(struct ahash_request *req) |
| 129 | +{ |
| 130 | + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
| 131 | + struct shash_desc *desc = ahash_request_ctx(req); |
| 132 | + |
| 133 | + desc->tfm = *ctx; |
| 134 | + |
| 135 | + return shash_ahash_digest(req, desc); |
| 136 | +} |
| 137 | + |
| 138 | +static int shash_async_export(struct ahash_request *req, void *out) |
| 139 | +{ |
| 140 | + return crypto_shash_export(ahash_request_ctx(req), out); |
| 141 | +} |
| 142 | + |
| 143 | +static int shash_async_import(struct ahash_request *req, const void *in) |
| 144 | +{ |
| 145 | + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
| 146 | + struct shash_desc *desc = ahash_request_ctx(req); |
| 147 | + |
| 148 | + desc->tfm = *ctx; |
| 149 | + |
| 150 | + return crypto_shash_import(desc, in); |
| 151 | +} |
| 152 | + |
| 153 | +static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) |
| 154 | +{ |
| 155 | + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); |
| 156 | + |
| 157 | + crypto_free_shash(*ctx); |
| 158 | +} |
| 159 | + |
| 160 | +static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) |
| 161 | +{ |
| 162 | + struct crypto_alg *calg = tfm->__crt_alg; |
| 163 | + struct shash_alg *alg = __crypto_shash_alg(calg); |
| 164 | + struct crypto_ahash *crt = __crypto_ahash_cast(tfm); |
| 165 | + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); |
| 166 | + struct crypto_shash *shash; |
| 167 | + |
| 168 | + if (!crypto_mod_get(calg)) |
| 169 | + return -EAGAIN; |
| 170 | + |
| 171 | + shash = crypto_create_tfm(calg, &crypto_shash_type); |
| 172 | + if (IS_ERR(shash)) { |
| 173 | + crypto_mod_put(calg); |
| 174 | + return PTR_ERR(shash); |
| 175 | + } |
| 176 | + |
| 177 | + *ctx = shash; |
| 178 | + tfm->exit = crypto_exit_shash_ops_async; |
| 179 | + |
| 180 | + crt->init = shash_async_init; |
| 181 | + crt->update = shash_async_update; |
| 182 | + crt->final = shash_async_final; |
| 183 | + crt->finup = shash_async_finup; |
| 184 | + crt->digest = shash_async_digest; |
| 185 | + if (crypto_shash_alg_has_setkey(alg)) |
| 186 | + crt->setkey = shash_async_setkey; |
| 187 | + |
| 188 | + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & |
| 189 | + CRYPTO_TFM_NEED_KEY); |
| 190 | + |
| 191 | + crt->export = shash_async_export; |
| 192 | + crt->import = shash_async_import; |
| 193 | + |
| 194 | + crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); |
| 195 | + |
| 196 | + return 0; |
| 197 | +} |
| 198 | + |
| 199 | +static struct crypto_ahash * |
| 200 | +crypto_clone_shash_ops_async(struct crypto_ahash *nhash, |
| 201 | + struct crypto_ahash *hash) |
| 202 | +{ |
| 203 | + struct crypto_shash **nctx = crypto_ahash_ctx(nhash); |
| 204 | + struct crypto_shash **ctx = crypto_ahash_ctx(hash); |
| 205 | + struct crypto_shash *shash; |
| 206 | + |
| 207 | + shash = crypto_clone_shash(*ctx); |
| 208 | + if (IS_ERR(shash)) { |
| 209 | + crypto_free_ahash(nhash); |
| 210 | + return ERR_CAST(shash); |
| 211 | + } |
| 212 | + |
| 213 | + *nctx = shash; |
| 214 | + |
| 215 | + return nhash; |
| 216 | +} |
| 217 | + |
32 | 218 | static int hash_walk_next(struct crypto_hash_walk *walk)
|
33 | 219 | {
|
34 | 220 | unsigned int offset = walk->offset;
|
|
0 commit comments