forked from Minki/linux
crypto: caam - add support for RSA key form 2
CAAM RSA private key may have either of three representations. 1. The first representation consists of the pair (n, d), where the components have the following meanings: n the RSA modulus d the RSA private exponent 2. The second representation consists of the triplet (p, q, d), where the components have the following meanings: p the first prime factor of the RSA modulus n q the second prime factor of the RSA modulus n d the RSA private exponent 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv), where the components have the following meanings: p the first prime factor of the RSA modulus n q the second prime factor of the RSA modulus n dP the first factors's CRT exponent dQ the second factors's CRT exponent qInv the (first) CRT coefficient The benefit of using the third or the second key form is lower computational cost for the decryption and signature operations. This patch adds support for the second RSA private key representation. Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com> Signed-off-by: Radu Alexe <radu.alexe@nxp.com> Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
7ca4a9a10f
commit
52e26d77b8
@ -18,6 +18,8 @@
|
||||
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
|
||||
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
|
||||
sizeof(struct rsa_priv_f1_pdb))
|
||||
#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
|
||||
sizeof(struct rsa_priv_f2_pdb))
|
||||
|
||||
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||
struct akcipher_request *req)
|
||||
@ -54,6 +56,23 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||
struct akcipher_request *req)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
|
||||
size_t p_sz = key->p_sz;
|
||||
size_t q_sz = key->p_sz;
|
||||
|
||||
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/* RSA Job Completion handler */
|
||||
static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
|
||||
{
|
||||
@ -90,6 +109,24 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
|
||||
akcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct akcipher_request *req = context;
|
||||
struct rsa_edesc *edesc;
|
||||
|
||||
if (err)
|
||||
caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
|
||||
|
||||
rsa_priv_f2_unmap(dev, edesc, req);
|
||||
rsa_io_unmap(dev, edesc, req);
|
||||
kfree(edesc);
|
||||
|
||||
akcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
size_t desclen)
|
||||
{
|
||||
@ -258,6 +295,81 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
|
||||
struct rsa_edesc *edesc)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct device *dev = ctx->dev;
|
||||
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
|
||||
int sec4_sg_index = 0;
|
||||
size_t p_sz = key->p_sz;
|
||||
size_t q_sz = key->p_sz;
|
||||
|
||||
pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, pdb->d_dma)) {
|
||||
dev_err(dev, "Unable to map RSA private exponent memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, pdb->p_dma)) {
|
||||
dev_err(dev, "Unable to map RSA prime factor p memory\n");
|
||||
goto unmap_d;
|
||||
}
|
||||
|
||||
pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, pdb->q_dma)) {
|
||||
dev_err(dev, "Unable to map RSA prime factor q memory\n");
|
||||
goto unmap_p;
|
||||
}
|
||||
|
||||
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
|
||||
dev_err(dev, "Unable to map RSA tmp1 memory\n");
|
||||
goto unmap_q;
|
||||
}
|
||||
|
||||
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
|
||||
dev_err(dev, "Unable to map RSA tmp2 memory\n");
|
||||
goto unmap_tmp1;
|
||||
}
|
||||
|
||||
if (edesc->src_nents > 1) {
|
||||
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
|
||||
pdb->g_dma = edesc->sec4_sg_dma;
|
||||
sec4_sg_index += edesc->src_nents;
|
||||
} else {
|
||||
pdb->g_dma = sg_dma_address(req->src);
|
||||
}
|
||||
|
||||
if (edesc->dst_nents > 1) {
|
||||
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
|
||||
pdb->f_dma = edesc->sec4_sg_dma +
|
||||
sec4_sg_index * sizeof(struct sec4_sg_entry);
|
||||
} else {
|
||||
pdb->f_dma = sg_dma_address(req->dst);
|
||||
}
|
||||
|
||||
pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
|
||||
pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_tmp1:
|
||||
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
|
||||
unmap_q:
|
||||
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
|
||||
unmap_p:
|
||||
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
|
||||
unmap_d:
|
||||
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int caam_rsa_enc(struct akcipher_request *req)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
@ -301,24 +413,14 @@ init_fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int caam_rsa_dec(struct akcipher_request *req)
|
||||
static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct device *jrdev = ctx->dev;
|
||||
struct rsa_edesc *edesc;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!key->n || !key->d))
|
||||
return -EINVAL;
|
||||
|
||||
if (req->dst_len < key->n_sz) {
|
||||
req->dst_len = key->n_sz;
|
||||
dev_err(jrdev, "Output buffer length less than parameter n\n");
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
/* Allocate extended descriptor */
|
||||
edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
|
||||
if (IS_ERR(edesc))
|
||||
@ -344,17 +446,73 @@ init_fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct device *jrdev = ctx->dev;
|
||||
struct rsa_edesc *edesc;
|
||||
int ret;
|
||||
|
||||
/* Allocate extended descriptor */
|
||||
edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
|
||||
ret = set_rsa_priv_f2_pdb(req, edesc);
|
||||
if (ret)
|
||||
goto init_fail;
|
||||
|
||||
/* Initialize Job Descriptor */
|
||||
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
|
||||
if (!ret)
|
||||
return -EINPROGRESS;
|
||||
|
||||
rsa_priv_f2_unmap(jrdev, edesc, req);
|
||||
|
||||
init_fail:
|
||||
rsa_io_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int caam_rsa_dec(struct akcipher_request *req)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!key->n || !key->d))
|
||||
return -EINVAL;
|
||||
|
||||
if (req->dst_len < key->n_sz) {
|
||||
req->dst_len = key->n_sz;
|
||||
dev_err(ctx->dev, "Output buffer length less than parameter n\n");
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
if (key->priv_form == FORM2)
|
||||
ret = caam_rsa_dec_priv_f2(req);
|
||||
else
|
||||
ret = caam_rsa_dec_priv_f1(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void caam_rsa_free_key(struct caam_rsa_key *key)
|
||||
{
|
||||
kzfree(key->d);
|
||||
kzfree(key->p);
|
||||
kzfree(key->q);
|
||||
kzfree(key->tmp1);
|
||||
kzfree(key->tmp2);
|
||||
kfree(key->e);
|
||||
kfree(key->n);
|
||||
key->d = NULL;
|
||||
key->e = NULL;
|
||||
key->n = NULL;
|
||||
key->d_sz = 0;
|
||||
key->e_sz = 0;
|
||||
key->n_sz = 0;
|
||||
memset(key, 0, sizeof(*key));
|
||||
}
|
||||
|
||||
static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
|
||||
@ -444,6 +602,43 @@ err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
|
||||
struct rsa_key *raw_key)
|
||||
{
|
||||
struct caam_rsa_key *rsa_key = &ctx->key;
|
||||
size_t p_sz = raw_key->p_sz;
|
||||
size_t q_sz = raw_key->q_sz;
|
||||
|
||||
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
|
||||
if (!rsa_key->p)
|
||||
return;
|
||||
rsa_key->p_sz = p_sz;
|
||||
|
||||
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
|
||||
if (!rsa_key->q)
|
||||
goto free_p;
|
||||
rsa_key->q_sz = q_sz;
|
||||
|
||||
rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
|
||||
if (!rsa_key->tmp1)
|
||||
goto free_q;
|
||||
|
||||
rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
|
||||
if (!rsa_key->tmp2)
|
||||
goto free_tmp1;
|
||||
|
||||
rsa_key->priv_form = FORM2;
|
||||
|
||||
return;
|
||||
|
||||
free_tmp1:
|
||||
kzfree(rsa_key->tmp1);
|
||||
free_q:
|
||||
kzfree(rsa_key->q);
|
||||
free_p:
|
||||
kzfree(rsa_key->p);
|
||||
}
|
||||
|
||||
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -490,6 +685,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
||||
memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
|
||||
memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
|
||||
|
||||
caam_rsa_set_priv_key_form(ctx, &raw_key);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -12,22 +12,58 @@
|
||||
#include "compat.h"
|
||||
#include "pdb.h"
|
||||
|
||||
/**
|
||||
* caam_priv_key_form - CAAM RSA private key representation
|
||||
* CAAM RSA private key may have either of two forms.
|
||||
*
|
||||
* 1. The first representation consists of the pair (n, d), where the
|
||||
* components have the following meanings:
|
||||
* n the RSA modulus
|
||||
* d the RSA private exponent
|
||||
*
|
||||
* 2. The second representation consists of the triplet (p, q, d), where the
|
||||
* components have the following meanings:
|
||||
* p the first prime factor of the RSA modulus n
|
||||
* q the second prime factor of the RSA modulus n
|
||||
* d the RSA private exponent
|
||||
*/
|
||||
enum caam_priv_key_form {
|
||||
FORM1,
|
||||
FORM2,
|
||||
};
|
||||
|
||||
/**
|
||||
* caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
|
||||
* @n : RSA modulus raw byte stream
|
||||
* @e : RSA public exponent raw byte stream
|
||||
* @d : RSA private exponent raw byte stream
|
||||
* @p : RSA prime factor p of RSA modulus n
|
||||
* @q : RSA prime factor q of RSA modulus n
|
||||
* @tmp1 : CAAM uses this temporary buffer as internal state buffer.
|
||||
* It is assumed to be as long as p.
|
||||
* @tmp2 : CAAM uses this temporary buffer as internal state buffer.
|
||||
* It is assumed to be as long as q.
|
||||
* @n_sz : length in bytes of RSA modulus n
|
||||
* @e_sz : length in bytes of RSA public exponent
|
||||
* @d_sz : length in bytes of RSA private exponent
|
||||
* @p_sz : length in bytes of RSA prime factor p of RSA modulus n
|
||||
* @q_sz : length in bytes of RSA prime factor q of RSA modulus n
|
||||
* @priv_form : CAAM RSA private key representation
|
||||
*/
|
||||
struct caam_rsa_key {
|
||||
u8 *n;
|
||||
u8 *e;
|
||||
u8 *d;
|
||||
u8 *p;
|
||||
u8 *q;
|
||||
u8 *tmp1;
|
||||
u8 *tmp2;
|
||||
size_t n_sz;
|
||||
size_t e_sz;
|
||||
size_t d_sz;
|
||||
size_t p_sz;
|
||||
size_t q_sz;
|
||||
enum caam_priv_key_form priv_form;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -59,6 +95,7 @@ struct rsa_edesc {
|
||||
union {
|
||||
struct rsa_pub_pdb pub;
|
||||
struct rsa_priv_f1_pdb priv_f1;
|
||||
struct rsa_priv_f2_pdb priv_f2;
|
||||
} pdb;
|
||||
u32 hw_desc[];
|
||||
};
|
||||
@ -66,5 +103,6 @@ struct rsa_edesc {
|
||||
/* Descriptor construction primitives. */
|
||||
void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
|
||||
void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
|
||||
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
|
||||
|
||||
#endif
|
||||
|
@ -483,6 +483,8 @@ struct dsa_verify_pdb {
|
||||
#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
|
||||
#define RSA_PDB_D_SHIFT 12
|
||||
#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
|
||||
#define RSA_PDB_Q_SHIFT 12
|
||||
#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
|
||||
|
||||
#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
|
||||
#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
|
||||
@ -490,6 +492,7 @@ struct dsa_verify_pdb {
|
||||
#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
|
||||
|
||||
#define RSA_PRIV_KEY_FRM_1 0
|
||||
#define RSA_PRIV_KEY_FRM_2 1
|
||||
|
||||
/**
|
||||
* RSA Encrypt Protocol Data Block
|
||||
@ -525,4 +528,30 @@ struct rsa_priv_f1_pdb {
|
||||
dma_addr_t d_dma;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* RSA Decrypt PDB - Private Key Form #2
|
||||
* @sgf : scatter-gather field
|
||||
* @g_dma : dma address of encrypted input data
|
||||
* @f_dma : dma address of output data
|
||||
* @d_dma : dma address of RSA private exponent
|
||||
* @p_dma : dma address of RSA prime factor p of RSA modulus n
|
||||
* @q_dma : dma address of RSA prime factor q of RSA modulus n
|
||||
* @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
|
||||
* as internal state buffer. It is assumed to be as long as p.
|
||||
* @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
|
||||
* as internal state buffer. It is assumed to be as long as q.
|
||||
* @p_q_len : length in bytes of first two prime factors of the RSA modulus n
|
||||
*/
|
||||
struct rsa_priv_f2_pdb {
|
||||
u32 sgf;
|
||||
dma_addr_t g_dma;
|
||||
dma_addr_t f_dma;
|
||||
dma_addr_t d_dma;
|
||||
dma_addr_t p_dma;
|
||||
dma_addr_t q_dma;
|
||||
dma_addr_t tmp1_dma;
|
||||
dma_addr_t tmp2_dma;
|
||||
u32 p_q_len;
|
||||
} __packed;
|
||||
|
||||
#endif
|
||||
|
@ -34,3 +34,20 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
|
||||
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
|
||||
RSA_PRIV_KEY_FRM_1);
|
||||
}
|
||||
|
||||
/* Descriptor for RSA Private operation - Private Key Form #2 */
|
||||
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
|
||||
{
|
||||
init_job_desc_pdb(desc, 0, sizeof(*pdb));
|
||||
append_cmd(desc, pdb->sgf);
|
||||
append_ptr(desc, pdb->g_dma);
|
||||
append_ptr(desc, pdb->f_dma);
|
||||
append_ptr(desc, pdb->d_dma);
|
||||
append_ptr(desc, pdb->p_dma);
|
||||
append_ptr(desc, pdb->q_dma);
|
||||
append_ptr(desc, pdb->tmp1_dma);
|
||||
append_ptr(desc, pdb->tmp2_dma);
|
||||
append_cmd(desc, pdb->p_q_len);
|
||||
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
|
||||
RSA_PRIV_KEY_FRM_2);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user