crc-t10dif: use fallback in initial state

Currently the crc-t10dif module starts out with the fallback disabled
and crct10dif_tfm == NULL.  crc_t10dif_mod_init() tries to allocate
crct10dif_tfm, and if it fails it enables the fallback.

This is backwards because it means that any call to crc_t10dif() prior
to module_init (which could theoretically happen from built-in code)
will crash rather than use the fallback as expected.  Also, it means
that if the initial tfm allocation fails, then the fallback stays
permanently enabled even if a crct10dif implementation is loaded later.

Change it to use the more logical solution of starting with the fallback
enabled, and disabling the fallback when a tfm gets allocated for the
first time.  This change also ends up simplifying the code.

Also take the opportunity to convert the code to use the new static_key
API, which is much less confusing than the old and deprecated one.

Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers 2020-06-09 23:39:42 -07:00 committed by Herbert Xu
parent 57b1aac1b4
commit be924e0aaa

View File

@ -17,7 +17,7 @@
#include <linux/notifier.h>
static struct crypto_shash __rcu *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
static DEFINE_MUTEX(crc_t10dif_mutex);
static struct work_struct crct10dif_rehash_work;
@ -26,7 +26,6 @@ static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, voi
struct crypto_alg *alg = data;
if (val != CRYPTO_MSG_ALG_LOADED ||
static_key_false(&crct10dif_fallback) ||
strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
return 0;
@ -41,10 +40,6 @@ static void crc_t10dif_rehash(struct work_struct *work)
mutex_lock(&crc_t10dif_mutex);
old = rcu_dereference_protected(crct10dif_tfm,
lockdep_is_held(&crc_t10dif_mutex));
if (!old) {
mutex_unlock(&crc_t10dif_mutex);
return;
}
new = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc_t10dif_mutex);
@ -53,8 +48,12 @@ static void crc_t10dif_rehash(struct work_struct *work)
rcu_assign_pointer(crct10dif_tfm, new);
mutex_unlock(&crc_t10dif_mutex);
synchronize_rcu();
crypto_free_shash(old);
if (old) {
synchronize_rcu();
crypto_free_shash(old);
} else {
static_branch_disable(&crct10dif_fallback);
}
}
static struct notifier_block crc_t10dif_nb = {
@ -69,7 +68,7 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
} desc;
int err;
if (static_key_false(&crct10dif_fallback))
if (static_branch_unlikely(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
rcu_read_lock();
@ -93,18 +92,9 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
struct crypto_shash *tfm;
INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
crypto_register_notifier(&crc_t10dif_nb);
mutex_lock(&crc_t10dif_mutex);
tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(tfm)) {
static_key_slow_inc(&crct10dif_fallback);
tfm = NULL;
}
RCU_INIT_POINTER(crct10dif_tfm, tfm);
mutex_unlock(&crc_t10dif_mutex);
crc_t10dif_rehash(&crct10dif_rehash_work);
return 0;
}
@ -124,20 +114,13 @@ static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp
const char *name;
int len;
if (static_key_false(&crct10dif_fallback))
if (static_branch_unlikely(&crct10dif_fallback))
return sprintf(buffer, "fallback\n");
rcu_read_lock();
tfm = rcu_dereference(crct10dif_tfm);
if (!tfm) {
len = sprintf(buffer, "init\n");
goto unlock;
}
name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
len = sprintf(buffer, "%s\n", name);
unlock:
rcu_read_unlock();
return len;