mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
2f1f34c1bf
The "ahash" API provides access to both CPU-based and hardware offload- based implementations of hash algorithms. Typically the former are implemented as "shash" algorithms under the hood, while the latter are implemented as "ahash" algorithms. The "ahash" API provides access to both. Various kernel subsystems use the ahash API because they want to support hashing hardware offload without using a separate API for it. Yet, the common case is that a crypto accelerator is not actually being used, and ahash is just wrapping a CPU-based shash algorithm. This patch optimizes the ahash API for that common case by eliminating the extra indirect call for each ahash operation on top of shash. It also fixes the double-counting of crypto stats in this scenario (though CONFIG_CRYPTO_STATS should *not* be enabled by anyone interested in performance anyway...), and it eliminates redundant checking of CRYPTO_TFM_NEED_KEY. As a bonus, it also shrinks struct crypto_ahash. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
489 lines
12 KiB
C
489 lines
12 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Synchronous Cryptographic Hash operations.
|
|
*
|
|
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
|
|
*/
|
|
|
|
#include <crypto/scatterwalk.h>
|
|
#include <linux/cryptouser.h>
|
|
#include <linux/err.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/string.h>
|
|
#include <net/netlink.h>
|
|
|
|
#include "hash.h"
|
|
|
|
static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
|
|
{
|
|
return hash_get_stat(&alg->halg);
|
|
}
|
|
|
|
static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
|
|
{
|
|
if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
|
|
return err;
|
|
|
|
if (err && err != -EINPROGRESS && err != -EBUSY)
|
|
atomic64_inc(&shash_get_stat(alg)->err_cnt);
|
|
|
|
return err;
|
|
}
|
|
|
|
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
EXPORT_SYMBOL_GPL(shash_no_setkey);
|
|
|
|
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
|
|
{
|
|
if (crypto_shash_alg_needs_key(alg))
|
|
crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
}
|
|
|
|
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
int err;
|
|
|
|
err = shash->setkey(tfm, key, keylen);
|
|
if (unlikely(err)) {
|
|
shash_set_needkey(tfm, shash);
|
|
return err;
|
|
}
|
|
|
|
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
|
|
|
|
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len)
|
|
{
|
|
struct shash_alg *shash = crypto_shash_alg(desc->tfm);
|
|
int err;
|
|
|
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
|
atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
|
|
|
|
err = shash->update(desc, data, len);
|
|
|
|
return crypto_shash_errstat(shash, err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_update);
|
|
|
|
int crypto_shash_final(struct shash_desc *desc, u8 *out)
|
|
{
|
|
struct shash_alg *shash = crypto_shash_alg(desc->tfm);
|
|
int err;
|
|
|
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
|
atomic64_inc(&shash_get_stat(shash)->hash_cnt);
|
|
|
|
err = shash->final(desc, out);
|
|
|
|
return crypto_shash_errstat(shash, err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_final);
|
|
|
|
static int shash_default_finup(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
struct shash_alg *shash = crypto_shash_alg(desc->tfm);
|
|
|
|
return shash->update(desc, data, len) ?:
|
|
shash->final(desc, out);
|
|
}
|
|
|
|
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
struct crypto_shash *tfm = desc->tfm;
|
|
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
int err;
|
|
|
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
|
|
struct crypto_istat_hash *istat = shash_get_stat(shash);
|
|
|
|
atomic64_inc(&istat->hash_cnt);
|
|
atomic64_add(len, &istat->hash_tlen);
|
|
}
|
|
|
|
err = shash->finup(desc, data, len, out);
|
|
|
|
return crypto_shash_errstat(shash, err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_finup);
|
|
|
|
static int shash_default_digest(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
struct shash_alg *shash = crypto_shash_alg(desc->tfm);
|
|
|
|
return shash->init(desc) ?:
|
|
shash->finup(desc, data, len, out);
|
|
}
|
|
|
|
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
struct crypto_shash *tfm = desc->tfm;
|
|
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
int err;
|
|
|
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
|
|
struct crypto_istat_hash *istat = shash_get_stat(shash);
|
|
|
|
atomic64_inc(&istat->hash_cnt);
|
|
atomic64_add(len, &istat->hash_tlen);
|
|
}
|
|
|
|
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
|
err = -ENOKEY;
|
|
else
|
|
err = shash->digest(desc, data, len, out);
|
|
|
|
return crypto_shash_errstat(shash, err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_digest);
|
|
|
|
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
SHASH_DESC_ON_STACK(desc, tfm);
|
|
int err;
|
|
|
|
desc->tfm = tfm;
|
|
|
|
err = crypto_shash_digest(desc, data, len, out);
|
|
|
|
shash_desc_zero(desc);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
|
|
|
|
int crypto_shash_export(struct shash_desc *desc, void *out)
|
|
{
|
|
struct crypto_shash *tfm = desc->tfm;
|
|
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
|
|
if (shash->export)
|
|
return shash->export(desc, out);
|
|
|
|
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm));
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_export);
|
|
|
|
int crypto_shash_import(struct shash_desc *desc, const void *in)
|
|
{
|
|
struct crypto_shash *tfm = desc->tfm;
|
|
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
|
|
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
|
return -ENOKEY;
|
|
|
|
if (shash->import)
|
|
return shash->import(desc, in);
|
|
|
|
memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_shash_import);
|
|
|
|
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
|
|
{
|
|
struct crypto_shash *hash = __crypto_shash_cast(tfm);
|
|
struct shash_alg *alg = crypto_shash_alg(hash);
|
|
|
|
alg->exit_tfm(hash);
|
|
}
|
|
|
|
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
|
|
{
|
|
struct crypto_shash *hash = __crypto_shash_cast(tfm);
|
|
struct shash_alg *alg = crypto_shash_alg(hash);
|
|
int err;
|
|
|
|
hash->descsize = alg->descsize;
|
|
|
|
shash_set_needkey(hash, alg);
|
|
|
|
if (alg->exit_tfm)
|
|
tfm->exit = crypto_shash_exit_tfm;
|
|
|
|
if (!alg->init_tfm)
|
|
return 0;
|
|
|
|
err = alg->init_tfm(hash);
|
|
if (err)
|
|
return err;
|
|
|
|
/* ->init_tfm() may have increased the descsize. */
|
|
if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
|
|
if (alg->exit_tfm)
|
|
alg->exit_tfm(hash);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void crypto_shash_free_instance(struct crypto_instance *inst)
|
|
{
|
|
struct shash_instance *shash = shash_instance(inst);
|
|
|
|
shash->free(shash);
|
|
}
|
|
|
|
static int __maybe_unused crypto_shash_report(
|
|
struct sk_buff *skb, struct crypto_alg *alg)
|
|
{
|
|
struct crypto_report_hash rhash;
|
|
struct shash_alg *salg = __crypto_shash_alg(alg);
|
|
|
|
memset(&rhash, 0, sizeof(rhash));
|
|
|
|
strscpy(rhash.type, "shash", sizeof(rhash.type));
|
|
|
|
rhash.blocksize = alg->cra_blocksize;
|
|
rhash.digestsize = salg->digestsize;
|
|
|
|
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
|
|
}
|
|
|
|
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
|
|
__maybe_unused;
|
|
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
|
|
{
|
|
struct shash_alg *salg = __crypto_shash_alg(alg);
|
|
|
|
seq_printf(m, "type : shash\n");
|
|
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
|
seq_printf(m, "digestsize : %u\n", salg->digestsize);
|
|
}
|
|
|
|
static int __maybe_unused crypto_shash_report_stat(
|
|
struct sk_buff *skb, struct crypto_alg *alg)
|
|
{
|
|
return crypto_hash_report_stat(skb, alg, "shash");
|
|
}
|
|
|
|
const struct crypto_type crypto_shash_type = {
|
|
.extsize = crypto_alg_extsize,
|
|
.init_tfm = crypto_shash_init_tfm,
|
|
.free = crypto_shash_free_instance,
|
|
#ifdef CONFIG_PROC_FS
|
|
.show = crypto_shash_show,
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_USER)
|
|
.report = crypto_shash_report,
|
|
#endif
|
|
#ifdef CONFIG_CRYPTO_STATS
|
|
.report_stat = crypto_shash_report_stat,
|
|
#endif
|
|
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
|
.maskset = CRYPTO_ALG_TYPE_MASK,
|
|
.type = CRYPTO_ALG_TYPE_SHASH,
|
|
.tfmsize = offsetof(struct crypto_shash, base),
|
|
};
|
|
|
|
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
|
|
struct crypto_instance *inst,
|
|
const char *name, u32 type, u32 mask)
|
|
{
|
|
spawn->base.frontend = &crypto_shash_type;
|
|
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_grab_shash);
|
|
|
|
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
|
|
u32 mask)
|
|
{
|
|
return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
|
|
|
|
int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
|
|
{
|
|
return crypto_type_has_alg(alg_name, &crypto_shash_type, type, mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_has_shash);
|
|
|
|
struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
|
|
{
|
|
struct crypto_tfm *tfm = crypto_shash_tfm(hash);
|
|
struct shash_alg *alg = crypto_shash_alg(hash);
|
|
struct crypto_shash *nhash;
|
|
int err;
|
|
|
|
if (!crypto_shash_alg_has_setkey(alg)) {
|
|
tfm = crypto_tfm_get(tfm);
|
|
if (IS_ERR(tfm))
|
|
return ERR_CAST(tfm);
|
|
|
|
return hash;
|
|
}
|
|
|
|
if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init))
|
|
return ERR_PTR(-ENOSYS);
|
|
|
|
nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
|
|
if (IS_ERR(nhash))
|
|
return nhash;
|
|
|
|
nhash->descsize = hash->descsize;
|
|
|
|
if (alg->clone_tfm) {
|
|
err = alg->clone_tfm(nhash, hash);
|
|
if (err) {
|
|
crypto_free_shash(nhash);
|
|
return ERR_PTR(err);
|
|
}
|
|
}
|
|
|
|
return nhash;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_clone_shash);
|
|
|
|
int hash_prepare_alg(struct hash_alg_common *alg)
|
|
{
|
|
struct crypto_istat_hash *istat = hash_get_stat(alg);
|
|
struct crypto_alg *base = &alg->base;
|
|
|
|
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
|
|
return -EINVAL;
|
|
|
|
/* alignmask is not useful for hashes, so it is not supported. */
|
|
if (base->cra_alignmask)
|
|
return -EINVAL;
|
|
|
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
|
|
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
|
memset(istat, 0, sizeof(*istat));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int shash_prepare_alg(struct shash_alg *alg)
|
|
{
|
|
struct crypto_alg *base = &alg->halg.base;
|
|
int err;
|
|
|
|
if (alg->descsize > HASH_MAX_DESCSIZE)
|
|
return -EINVAL;
|
|
|
|
if ((alg->export && !alg->import) || (alg->import && !alg->export))
|
|
return -EINVAL;
|
|
|
|
err = hash_prepare_alg(&alg->halg);
|
|
if (err)
|
|
return err;
|
|
|
|
base->cra_type = &crypto_shash_type;
|
|
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
|
|
|
|
/*
|
|
* Handle missing optional functions. For each one we can either
|
|
* install a default here, or we can leave the pointer as NULL and check
|
|
* the pointer for NULL in crypto_shash_*(), avoiding an indirect call
|
|
* when the default behavior is desired. For ->finup and ->digest we
|
|
* install defaults, since for optimal performance algorithms should
|
|
* implement these anyway. On the other hand, for ->import and
|
|
* ->export the common case and best performance comes from the simple
|
|
* memcpy of the shash_desc_ctx, so when those pointers are NULL we
|
|
* leave them NULL and provide the memcpy with no indirect call.
|
|
*/
|
|
if (!alg->finup)
|
|
alg->finup = shash_default_finup;
|
|
if (!alg->digest)
|
|
alg->digest = shash_default_digest;
|
|
if (!alg->export)
|
|
alg->halg.statesize = alg->descsize;
|
|
if (!alg->setkey)
|
|
alg->setkey = shash_no_setkey;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int crypto_register_shash(struct shash_alg *alg)
|
|
{
|
|
struct crypto_alg *base = &alg->base;
|
|
int err;
|
|
|
|
err = shash_prepare_alg(alg);
|
|
if (err)
|
|
return err;
|
|
|
|
return crypto_register_alg(base);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_register_shash);
|
|
|
|
void crypto_unregister_shash(struct shash_alg *alg)
|
|
{
|
|
crypto_unregister_alg(&alg->base);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
|
|
|
|
int crypto_register_shashes(struct shash_alg *algs, int count)
|
|
{
|
|
int i, ret;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
ret = crypto_register_shash(&algs[i]);
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err:
|
|
for (--i; i >= 0; --i)
|
|
crypto_unregister_shash(&algs[i]);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_register_shashes);
|
|
|
|
void crypto_unregister_shashes(struct shash_alg *algs, int count)
|
|
{
|
|
int i;
|
|
|
|
for (i = count - 1; i >= 0; --i)
|
|
crypto_unregister_shash(&algs[i]);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_unregister_shashes);
|
|
|
|
int shash_register_instance(struct crypto_template *tmpl,
|
|
struct shash_instance *inst)
|
|
{
|
|
int err;
|
|
|
|
if (WARN_ON(!inst->free))
|
|
return -EINVAL;
|
|
|
|
err = shash_prepare_alg(&inst->alg);
|
|
if (err)
|
|
return err;
|
|
|
|
return crypto_register_instance(tmpl, shash_crypto_instance(inst));
|
|
}
|
|
EXPORT_SYMBOL_GPL(shash_register_instance);
|
|
|
|
void shash_free_singlespawn_instance(struct shash_instance *inst)
|
|
{
|
|
crypto_drop_spawn(shash_instance_ctx(inst));
|
|
kfree(inst);
|
|
}
|
|
EXPORT_SYMBOL_GPL(shash_free_singlespawn_instance);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
|