summaryrefslogtreecommitdiff
path: root/drivers/crypto/qat
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c7
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c11
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c138
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs_send.c86
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs_send.h11
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c304
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c10
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h39
11 files changed, 377 insertions, 232 deletions
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index 8fd44703115f..359fb7989dfb 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -52,13 +52,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- /* Temporarily set the number of crypto instances to zero to avoid
- * registering the crypto algorithms.
- * This will be removed when the algorithms will support the
- * CRYPTO_TFM_REQ_MAY_BACKLOG flag
- */
- instances = 0;
-
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 9c57abdf56b7..fc477f016213 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -15,6 +15,7 @@ intel_qat-objs := adf_cfg.o \
qat_crypto.o \
qat_algs.o \
qat_asym_algs.o \
+ qat_algs_send.o \
qat_uclo.o \
qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 8ba28409fb74..630d0483c4e0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -8,6 +8,9 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#define ADF_MAX_RING_THRESHOLD 80
+#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
+
static inline u32 adf_modulo(u32 data, u32 shift)
{
u32 div = data >> shift;
@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
bank->irq_mask);
}
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
+{
+ return atomic_read(ring->inflights) > ring->threshold;
+}
+
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ int max_inflights;
u32 ring_num;
int ret;
@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring->head = 0;
ring->tail = 0;
+ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
+ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
atomic_set(ring->inflights, 0);
ret = adf_init_ring(ring);
if (ret)
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
index 2c95f1697c76..e6ef6f9b7691 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.h
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr);
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index 501bcf0f1809..8b2c92ba7ca1 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
spinlock_t lock; /* protects ring data struct */
u16 head;
u16 tail;
+ u32 threshold;
u8 ring_number;
u8 ring_size;
u8 msg_size;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f998ed58457c..873533dc43a7 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -17,7 +17,7 @@
#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
-#include "adf_transport.h"
+#include "qat_algs_send.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "icp_qat_hw.h"
@@ -46,19 +46,6 @@
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-struct qat_alg_buf {
- u32 len;
- u32 resrvd;
- u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed __aligned(64);
-
/* Common content descriptor */
struct qat_alg_cd {
union {
@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
bl->bufers[i].len, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
- kfree(bl);
+
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bl);
+
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs;
@@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
- kfree(blout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(blout);
}
}
@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
+ int node = dev_to_node(&GET_DEV(inst->accel_dev));
if (unlikely(!n))
return -EINVAL;
- bufl = kzalloc_node(sz, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!bufl))
- return -ENOMEM;
+ qat_req->buf.sgl_src_valid = false;
+ qat_req->buf.sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, GFP_ATOMIC, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &qat_req->buf.sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_src_valid = true;
+ }
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n + 1);
+ sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0;
- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!buflout))
- goto err_in;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_dst_valid = true;
+ }
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
@@ -810,7 +817,9 @@ err_out:
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- kfree(buflout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(buflout);
err_in:
if (!dma_mapping_error(dev, blp))
@@ -823,7 +832,8 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- kfree(bufl);
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
@@ -925,8 +935,25 @@ void qat_alg_callback(void *resp)
struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data;
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->sym_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
}
static int qat_alg_aead_dec(struct aead_request *areq)
@@ -939,7 +966,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
- int ret, ctr = 0;
+ int ret;
u32 cipher_len;
cipher_len = areq->cryptlen - digst_size;
@@ -965,15 +992,12 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_aead_enc(struct aead_request *areq)
@@ -986,7 +1010,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
u8 *iv = areq->iv;
- int ret, ctr = 0;
+ int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
@@ -1013,15 +1037,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
@@ -1174,7 +1194,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
@@ -1198,15 +1218,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
@@ -1243,7 +1259,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
@@ -1268,15 +1284,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
new file mode 100644
index 000000000000..ff5b4347f783
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_transport.h"
+#include "qat_algs_send.h"
+#include "qat_crypto.h"
+
+#define ADF_MAX_RETRIES 20
+
+static int qat_alg_send_message_retry(struct qat_alg_req *req)
+{
+ int ret = 0, ctr = 0;
+
+ do {
+ ret = adf_send_message(req->tx_ring, req->fw_req);
+ } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
+
+ if (ret == -EAGAIN)
+ return -ENOSPC;
+
+ return -EINPROGRESS;
+}
+
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+{
+ struct qat_alg_req *req, *tmp;
+
+ spin_lock_bh(&backlog->lock);
+ list_for_each_entry_safe(req, tmp, &backlog->list, list) {
+ if (adf_send_message(req->tx_ring, req->fw_req)) {
+ /* The HW ring is full. Do nothing.
+ * qat_alg_send_backlog() will be invoked again by
+ * another callback.
+ */
+ break;
+ }
+ list_del(&req->list);
+ req->base->complete(req->base, -EINPROGRESS);
+ }
+ spin_unlock_bh(&backlog->lock);
+}
+
+static void qat_alg_backlog_req(struct qat_alg_req *req,
+ struct qat_instance_backlog *backlog)
+{
+ INIT_LIST_HEAD(&req->list);
+
+ spin_lock_bh(&backlog->lock);
+ list_add_tail(&req->list, &backlog->list);
+ spin_unlock_bh(&backlog->lock);
+}
+
+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
+{
+ struct qat_instance_backlog *backlog = req->backlog;
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+ /* If any request is already backlogged, then add to backlog list */
+ if (!list_empty(&backlog->list))
+ goto enqueue;
+
+ /* If ring is nearly full, then add to backlog list */
+ if (adf_ring_nearly_full(tx_ring))
+ goto enqueue;
+
+ /* If adding request to HW ring fails, then add to backlog list */
+ if (adf_send_message(tx_ring, fw_req))
+ goto enqueue;
+
+ return -EINPROGRESS;
+
+enqueue:
+ qat_alg_backlog_req(req, backlog);
+
+ return -EBUSY;
+}
+
+int qat_alg_send_message(struct qat_alg_req *req)
+{
+ u32 flags = req->base->flags;
+
+ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return qat_alg_send_message_maybacklog(req);
+ else
+ return qat_alg_send_message_retry(req);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
new file mode 100644
index 000000000000..5ce9f4f69d8f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs_send.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef QAT_ALGS_SEND_H
+#define QAT_ALGS_SEND_H
+
+#include "qat_crypto.h"
+
+int qat_alg_send_message(struct qat_alg_req *req);
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index b0b78445418b..7173a2a0a484 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -12,6 +12,7 @@
#include <crypto/scatterwalk.h>
#include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
#include "adf_transport.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
@@ -135,8 +136,23 @@ struct qat_asym_request {
} areq;
int err;
void (*cb)(struct icp_qat_fw_pke_resp *resp);
+ struct qat_alg_req alg_req;
} __aligned(64);
+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->pke_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
+}
+
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
struct qat_asym_request *req = (void *)(__force long)resp->opaque;
@@ -148,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (areq->src) {
- if (req->src_align)
- dma_free_coherent(dev, req->ctx.dh->p_size,
- req->src_align, req->in.dh.in.b);
- else
- dma_unmap_single(dev, req->in.dh.in.b,
- req->ctx.dh->p_size, DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
+ DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
}
areq->dst_len = req->ctx.dh->p_size;
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
-
- dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
- req->out.dh.r);
- } else {
- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
- DMA_FROM_DEVICE);
+ kfree_sensitive(req->dst_align);
}
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
+
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -213,8 +224,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(kpp_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
- int ret, ctr = 0;
+ int ret;
int n_input_params = 0;
+ u8 *vaddr;
if (unlikely(!ctx->xa))
return -EINVAL;
@@ -223,6 +235,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
req->dst_len = ctx->p_size;
return -EOVERFLOW;
}
+
+ if (req->src_len > ctx->p_size)
+ return -EINVAL;
+
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -271,27 +287,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
qat_req->src_align = NULL;
- qat_req->in.dh.in.b = dma_map_single(dev,
- sg_virt(req->src),
- req->src_len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- qat_req->in.dh.in.b)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->p_size - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev,
- ctx->p_size,
- &qat_req->in.dh.in.b,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift,
req->src, 0, req->src_len, 0);
+
+ vaddr = qat_req->src_align;
}
+
+ qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
+ goto unmap_src;
}
/*
* dst can be of any size in valid range, but HW expects it to be the
@@ -302,20 +315,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
*/
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
qat_req->dst_align = NULL;
- qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
- goto unmap_src;
-
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
- &qat_req->out.dh.r,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
+
+ vaddr = qat_req->dst_align;
}
+ qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+ goto unmap_dst;
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
@@ -338,13 +349,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->input_param_count = n_input_params;
msg->output_param_count = 1;
- do {
- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
- } while (ret == -EBUSY && ctr++ < 100);
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
- if (!ret)
- return -EINPROGRESS;
+ return ret;
+unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_dh_output_params),
@@ -355,23 +366,17 @@ unmap_in_params:
sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
- qat_req->out.dh.r);
- else
- if (!dma_mapping_error(dev, qat_req->out.dh.r))
- dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.dh.r))
+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+ DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
if (req->src) {
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
- qat_req->in.dh.in.b);
- else
- if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
- dma_unmap_single(dev, qat_req->in.dh.in.b,
- ctx->p_size,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+ dma_unmap_single(dev, qat_req->in.dh.in.b,
+ ctx->p_size,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
}
return ret;
}
@@ -420,14 +425,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
{
if (ctx->g) {
+ memset(ctx->g, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
ctx->g = NULL;
}
if (ctx->xa) {
+ memset(ctx->xa, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
ctx->xa = NULL;
}
if (ctx->p) {
+ memset(ctx->p, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
ctx->p = NULL;
}
@@ -510,25 +518,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align)
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
- req->in.rsa.enc.m);
- else
- dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
- DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
+
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
areq->dst_len = req->ctx.rsa->key_sz;
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
- req->out.rsa.enc.c);
- } else {
- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
- DMA_FROM_DEVICE);
+ kfree_sensitive(req->dst_align);
}
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+ DMA_FROM_DEVICE);
+
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -542,8 +547,11 @@ void qat_alg_asym_callback(void *_resp)
{
struct icp_qat_fw_pke_resp *resp = _resp;
struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+ struct qat_instance_backlog *backlog = areq->alg_req.backlog;
areq->cb(resp);
+
+ qat_alg_send_backlog(backlog);
}
#define PKE_RSA_EP_512 0x1c161b21
@@ -642,7 +650,8 @@ static int qat_rsa_enc(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
- int ret, ctr = 0;
+ u8 *vaddr;
+ int ret;
if (unlikely(!ctx->n || !ctx->e))
return -EINVAL;
@@ -651,6 +660,10 @@ static int qat_rsa_enc(struct akcipher_request *req)
req->dst_len = ctx->key_sz;
return -EOVERFLOW;
}
+
+ if (req->src_len > ctx->key_sz)
+ return -EINVAL;
+
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -679,40 +692,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
- req->src_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.enc.m,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
+ vaddr = qat_req->src_align;
}
- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
- qat_req->dst_align = NULL;
- qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
- goto unmap_src;
+ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+ goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.enc.c,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
-
+ vaddr = qat_req->dst_align;
}
+
+ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+ goto unmap_dst;
+
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
@@ -732,13 +744,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
- do {
- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
- } while (ret == -EBUSY && ctr++ < 100);
- if (!ret)
- return -EINPROGRESS;
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+ return ret;
+
+unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params),
@@ -749,21 +762,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.rsa.enc.c);
- else
- if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
- dma_unmap_single(dev, qat_req->out.rsa.enc.c,
- ctx->key_sz, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.rsa.enc.m);
- else
- if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
- dma_unmap_single(dev, qat_req->in.rsa.enc.m,
- ctx->key_sz, DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
return ret;
}
@@ -776,7 +783,8 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
- int ret, ctr = 0;
+ u8 *vaddr;
+ int ret;
if (unlikely(!ctx->n || !ctx->d))
return -EINVAL;
@@ -785,6 +793,10 @@ static int qat_rsa_dec(struct akcipher_request *req)
req->dst_len = ctx->key_sz;
return -EOVERFLOW;
}
+
+ if (req->src_len > ctx->key_sz)
+ return -EINVAL;
+
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -823,40 +835,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
- req->dst_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.dec.c,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
+ vaddr = qat_req->src_align;
}
- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
- qat_req->dst_align = NULL;
- qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
- goto unmap_src;
+ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+ goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.dec.m,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
-
+ vaddr = qat_req->dst_align;
}
+ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+ goto unmap_dst;
if (ctx->crt_mode)
qat_req->in.rsa.in_tab[6] = 0;
@@ -884,13 +893,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->input_param_count = 3;
msg->output_param_count = 1;
- do {
- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
- } while (ret == -EBUSY && ctr++ < 100);
- if (!ret)
- return -EINPROGRESS;
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+ return ret;
+unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params),
@@ -901,21 +911,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.rsa.dec.m);
- else
- if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
- dma_unmap_single(dev, qat_req->out.rsa.dec.m,
- ctx->key_sz, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.rsa.dec.c);
- else
- if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
- dma_unmap_single(dev, qat_req->in.rsa.dec.c,
- ctx->key_sz, DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
return ret;
}
@@ -1233,18 +1237,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
- if (ctx->n)
- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
- if (ctx->e)
- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
- if (ctx->d) {
- memset(ctx->d, '\0', ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
- }
+ qat_rsa_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
- ctx->n = NULL;
- ctx->e = NULL;
- ctx->d = NULL;
}
static struct akcipher_alg rsa = {
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 3efbb3883601..994e43fab0a4 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -136,13 +136,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- /* Temporarily set the number of crypto instances to zero to avoid
- * registering the crypto algorithms.
- * This will be removed when the algorithms will support the
- * CRYPTO_TFM_REQ_MAY_BACKLOG flag
- */
- instances = 0;
-
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
@@ -328,6 +321,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->pke_rx);
if (ret)
goto err;
+
+ INIT_LIST_HEAD(&inst->backlog.list);
+ spin_lock_init(&inst->backlog.lock);
}
return 0;
err:
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index b6a4c95ae003..245b6d9a3650 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -9,6 +9,19 @@
#include "adf_accel_devices.h"
#include "icp_qat_fw_la.h"
+struct qat_instance_backlog {
+ struct list_head list;
+ spinlock_t lock; /* protects backlog list */
+};
+
+struct qat_alg_req {
+ u32 *fw_req;
+ struct adf_etr_ring_data *tx_ring;
+ struct crypto_async_request *base;
+ struct list_head list;
+ struct qat_instance_backlog *backlog;
+};
+
struct qat_crypto_instance {
struct adf_etr_ring_data *sym_tx;
struct adf_etr_ring_data *sym_rx;
@@ -19,8 +32,29 @@ struct qat_crypto_instance {
unsigned long state;
int id;
atomic_t refctr;
+ struct qat_instance_backlog backlog;
};
+#define QAT_MAX_BUFF_DESC 4
+
+struct qat_alg_buf {
+ u32 len;
+ u32 resrvd;
+ u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+ struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
struct qat_crypto_request_buffs {
struct qat_alg_buf_list *bl;
dma_addr_t blp;
@@ -28,6 +62,10 @@ struct qat_crypto_request_buffs {
dma_addr_t bloutp;
size_t sz;
size_t sz_out;
+ bool sgl_src_valid;
+ bool sgl_dst_valid;
+ struct qat_alg_fixed_buf_list sgl_src;
+ struct qat_alg_fixed_buf_list sgl_dst;
};
struct qat_crypto_request;
@@ -53,6 +91,7 @@ struct qat_crypto_request {
u8 iv[AES_BLOCK_SIZE];
};
bool encryption;
+ struct qat_alg_req alg_req;
};
static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)