summaryrefslogtreecommitdiff
path: root/drivers/crypto/qat/qat_common/qat_algs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common/qat_algs.c')
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c138
1 files changed, 75 insertions, 63 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f998ed58457c..873533dc43a7 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -17,7 +17,7 @@
#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
-#include "adf_transport.h"
+#include "qat_algs_send.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "icp_qat_hw.h"
@@ -46,19 +46,6 @@
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-struct qat_alg_buf {
- u32 len;
- u32 resrvd;
- u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed __aligned(64);
-
/* Common content descriptor */
struct qat_alg_cd {
union {
@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
bl->bufers[i].len, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
- kfree(bl);
+
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bl);
+
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs;
@@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
- kfree(blout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(blout);
}
}
@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
+ int node = dev_to_node(&GET_DEV(inst->accel_dev));
if (unlikely(!n))
return -EINVAL;
- bufl = kzalloc_node(sz, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!bufl))
- return -ENOMEM;
+ qat_req->buf.sgl_src_valid = false;
+ qat_req->buf.sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, GFP_ATOMIC, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &qat_req->buf.sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_src_valid = true;
+ }
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n + 1);
+ sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0;
- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!buflout))
- goto err_in;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_dst_valid = true;
+ }
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
@@ -810,7 +817,9 @@ err_out:
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- kfree(buflout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(buflout);
err_in:
if (!dma_mapping_error(dev, blp))
@@ -823,7 +832,8 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- kfree(bufl);
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
@@ -925,8 +935,25 @@ void qat_alg_callback(void *resp)
struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data;
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->sym_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
}
static int qat_alg_aead_dec(struct aead_request *areq)
@@ -939,7 +966,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
- int ret, ctr = 0;
+ int ret;
u32 cipher_len;
cipher_len = areq->cryptlen - digst_size;
@@ -965,15 +992,12 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_aead_enc(struct aead_request *areq)
@@ -986,7 +1010,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
u8 *iv = areq->iv;
- int ret, ctr = 0;
+ int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
@@ -1013,15 +1037,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
@@ -1174,7 +1194,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
@@ -1198,15 +1218,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
@@ -1243,7 +1259,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
@@ -1268,15 +1284,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)