Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

  API:
   - Fix async algif_skcipher, it was broken by recent fixes.
   - Fix potential race condition in algif_skcipher with ctx.
   - Fix potential memory corruption in algif_skcipher.
   - Add missing lock to crypto_user when doing an alg dump.

  Drivers:
   - marvell/cesa was testing the wrong variable for NULL after
     allocation.
   - Fix potential double-free in atmel-sha.
   - Fix illegal call to sleepin function from atomic context in
     atmel-sha"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: marvell/cesa - fix test in mv_cesa_dev_dma_init()
  crypto: atmel-sha - remove calls of clk_prepare() from atomic contexts
  crypto: atmel-sha - fix atmel_sha_remove()
  crypto: algif_skcipher - Do not set MAY_BACKLOG on the async path
  crypto: algif_skcipher - Do not dereference ctx without socket lock
  crypto: algif_skcipher - Do not assume that req is unchanged
  crypto: user - lock crypto_alg_list on alg dump
This commit is contained in:
Linus Torvalds 2016-02-09 10:15:16 -08:00
commit 7cf91adc0d
4 changed files with 62 additions and 53 deletions

View File

@ -65,18 +65,10 @@ struct skcipher_async_req {
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
char iv[];
atomic_t *inflight;
struct skcipher_request req;
};
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
#define GET_REQ_SIZE(ctx) \
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
#define GET_IV_SIZE(ctx) \
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
struct sock *sk = req->data;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
struct skcipher_async_req *sreq = req->data;
struct kiocb *iocb = sreq->iocb;
atomic_dec(&ctx->inflight);
atomic_dec(sreq->inflight);
skcipher_free_async_sgls(sreq);
kfree(req);
kzfree(sreq);
iocb->ki_complete(iocb, err, err);
}
@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
unsigned int txbufs = 0, len = 0, tx_nents;
unsigned int reqsize = crypto_skcipher_reqsize(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
int err = -ENOMEM;
bool mark = false;
char *iv;
sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
if (unlikely(!sreq))
goto out;
req = &sreq->req;
iv = (char *)(req + 1) + reqsize;
sreq->iocb = msg->msg_iocb;
INIT_LIST_HEAD(&sreq->list);
sreq->inflight = &ctx->inflight;
lock_sock(sk);
req = kmalloc(reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
sreq = GET_SREQ(req, ctx);
sreq->iocb = msg->msg_iocb;
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
tx_nents = skcipher_all_sg_nents(ctx);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
if (unlikely(!sreq->tsg)) {
kfree(req);
if (unlikely(!sreq->tsg))
goto unlock;
}
sg_init_table(sreq->tsg, tx_nents);
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_async_cb, sk);
memcpy(iv, ctx->iv, ivsize);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
skcipher_async_cb, sreq);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
len, sreq->iv);
len, iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
sreq = NULL;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
kzfree(sreq);
out:
return err;
}
@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
&ctx->req));
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
unsigned bs = crypto_skcipher_blocksize(tfm);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
ask->private = ctx;
skcipher_request_set_tfm(&ctx->req, skcipher);
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;

View File

@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (link->dump == NULL)
return -EINVAL;
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
.done = link->done,
.min_dump_alloc = dump_alloc,
};
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
up_read(&crypto_alg_sem);
return err;
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,

View File

@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
clk_disable_unprepare(dd->iclk);
clk_disable(dd->iclk);
if (req->base.complete)
req->base.complete(&req->base, err);
@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
{
int err;
err = clk_prepare_enable(dd->iclk);
err = clk_enable(dd->iclk);
if (err)
return err;
@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
dev_info(dd->dev,
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
clk_disable(dd->iclk);
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
goto res_err;
}
err = clk_prepare(sha_dd->iclk);
if (err)
goto res_err;
atmel_sha_hw_version_init(sha_dd);
atmel_sha_get_cap(sha_dd);
@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(pdata)) {
dev_err(&pdev->dev, "platform data not available\n");
err = PTR_ERR(pdata);
goto res_err;
goto iclk_unprepare;
}
}
if (!pdata->dma_slave) {
err = -ENXIO;
goto res_err;
goto iclk_unprepare;
}
err = atmel_sha_dma_init(sha_dd, pdata);
if (err)
@ -1457,6 +1461,8 @@ err_algs:
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
err_sha_dma:
iclk_unprepare:
clk_unprepare(sha_dd->iclk);
res_err:
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
iounmap(sha_dd->io_base);
clk_put(sha_dd->iclk);
if (sha_dd->irq >= 0)
free_irq(sha_dd->irq, sha_dd);
clk_unprepare(sha_dd->iclk);
return 0;
}

View File

@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
return -ENOMEM;
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
if (!dma->cache_pool)
if (!dma->padding_pool)
return -ENOMEM;
cesa->dma = dma;