#
CONFIG_RTE_LIBRTE_PMD_QAT=y
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
+CONFIG_RTE_LIBRTE_PMD_QAT_ASYM=n
#
# Max. number of QuickAssist devices, which can be detected and attached
#
Also, offset of data to authenticate "op.sym.auth.data.offset"
must be such that points at the start of the COUNT bytes.
+Asymmetric Crypto Service on QAT
+--------------------------------
+The QAT Asym PMD has support for:
+
+Limitations
+~~~~~~~~~~~
.. _building_qat:
* symmetric cryptography
* data compression
+* asymmetric cryptography
These services are provided to DPDK applications via PMDs which register to
implement the corresponding cryptodev and compressdev APIs. The PMDs use
# library symmetric crypto source files
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_ASYM),y)
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lcrypto
+ CFLAGS += -DBUILD_QAT_ASYM
+ SRCS-y += qat_asym.c
+ SRCS-y += qat_asym_pmd.c
+ build_qat = yes
+endif
ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
LDLIBS += -lrte_cryptodev
LDLIBS += -lcrypto
* - runtime data
*/
struct qat_sym_dev_private;
+struct qat_asym_dev_private;
struct qat_comp_dev_private;
struct qat_pci_device {
struct qat_sym_dev_private *sym_dev;
/**< link back to cryptodev private data */
struct rte_device sym_rte_dev;
- /**< This represents the crypto subset of this pci device.
+ /**< This represents the crypto sym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ /* Data relating to asymmetric crypto service */
+ struct qat_asym_dev_private *asym_dev;
+ /**< link back to cryptodev private data */
+ struct rte_device asym_rte_dev;
+ /**< This represents the crypto asym subset of this pci device.
* Register with this rather than with the one in
* pci_dev so that its driver can have a crypto-specific name
*/
#include "qat_device.h"
#include "qat_qp.h"
#include "qat_sym.h"
+#include "qat_asym.h"
#include "qat_comp.h"
#include "adf_transport_access_macros.h"
QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
goto create_err;
}
+ memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
}
qp->qat_dev_gen = qat_dev->qat_dev_gen;
else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
qat_comp_process_response(ops, resp_msg,
&tmp_qp->stats.dequeue_err_count);
+ else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
+#ifdef BUILD_QAT_ASYM
+ qat_asym_process_response(ops, resp_msg,
+ tmp_qp->op_cookies[head / rx_queue->msg_size]);
+#endif
+ }
head = adf_modulo(head + rx_queue->msg_size,
rx_queue->modulo_mask);
# Add our sources files to the list
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
- 'qat_sym_session.c')
+ 'qat_sym_session.c',
+ 'qat_asym_pmd.c',
+ 'qat_asym.c')
qat_ext_deps += dep
qat_cflags += '-DBUILD_QAT_SYM'
+ qat_cflags += '-DBUILD_QAT_ASYM'
endif
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <stdarg.h>
+
+#include "qat_asym.h"
+#include "icp_qat_fw_pke.h"
+#include "icp_qat_fw.h"
+#include "qat_pke_functionality_arrays.h"
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int __rte_unused qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+ size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+ size_t i;
+
+ for (i = 0; i < arr_sz; i++) {
+ if (*size <= arr[i][0]) {
+ *size = arr[i][0];
+ *func_id = arr[i][1];
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void qat_asym_build_req_tmpl(void *sess_private_data,
+ struct rte_crypto_asym_xform __rte_unused *xform)
+{
+
+ struct icp_qat_fw_pke_request *qat_req;
+ struct qat_asym_session *session = sess_private_data;
+
+ qat_req = &session->req_tmpl;
+ memset(qat_req, 0, sizeof(*qat_req));
+ qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+ qat_req->pke_hdr.hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+ (ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ qat_req->pke_hdr.comn_req_flags = 0;
+ qat_req->pke_hdr.resrvd1 = 0;
+ qat_req->pke_hdr.resrvd2 = 0;
+ qat_req->pke_hdr.kpt_mask = 0;
+ qat_req->pke_hdr.kpt_rn_mask = 0;
+ qat_req->pke_hdr.cd_pars.content_desc_addr = 0;
+ qat_req->pke_hdr.cd_pars.content_desc_resrvd = 0;
+ qat_req->resrvd1 = 0;
+ qat_req->resrvd2 = 0;
+ qat_req->next_req_adr = 0;
+}
+
+static size_t __rte_unused max_of(int n, ...)
+{
+ va_list args;
+ size_t len = 0, num;
+ int i;
+
+ va_start(args, n);
+ len = va_arg(args, size_t);
+
+ for (i = 0; i < n - 1; i++) {
+ num = va_arg(args, size_t);
+ if (num > len)
+ len = num;
+ }
+ va_end(args);
+
+ return len;
+}
+
+static void __rte_unused qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+ int in_count, int out_count, int in_size, int out_size)
+{
+ int i;
+
+ for (i = 0; i < in_count; i++)
+ memset(cookie->input_array[i], 0x0, in_size);
+ for (i = 0; i < out_count; i++)
+ memset(cookie->output_array[i], 0x0, out_size);
+}
+
+static int __rte_unused qat_asym_check_nonzero(rte_crypto_param n)
+{
+ if (n.length < 8) {
+ /* Not a case for any cryptograpic function except for DH
+ * generator which very often can be of one byte length
+ */
+ size_t i;
+
+ if (n.data[n.length - 1] == 0x0) {
+ for (i = 0; i < n.length - 1; i++)
+ if (n.data[i] != 0x0)
+ break;
+ if (i == n.length - 1)
+ return QAT_ASYM_ERROR_INVALID_PARAM;
+ }
+ } else if (*(uint64_t *)&n.data[
+ n.length - 8] == 0) {
+ /* Very likely it is zeroed modulus */
+ size_t i;
+
+ for (i = 0; i < n.length - 8; i++)
+ if (n.data[i] != 0x0)
+ break;
+ if (i == n.length - 8)
+ return QAT_ASYM_ERROR_INVALID_PARAM;
+ }
+
+ return 0;
+}
+
+int
+qat_asym_build_request(void *in_op,
+ uint8_t *out_msg,
+ void *op_cookie,
+ __rte_unused enum qat_device_gen qat_dev_gen)
+{
+ struct qat_asym_session *ctx;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct rte_crypto_asym_op __rte_unused *asym_op = op->asym;
+ struct icp_qat_fw_pke_request *qat_req =
+ (struct icp_qat_fw_pke_request *)out_msg;
+ struct qat_asym_op_cookie *cookie =
+ (struct qat_asym_op_cookie *)op_cookie;
+
+ ctx = (struct qat_asym_session *)get_asym_session_private_data(
+ op->asym->session, cryptodev_qat_asym_driver_id);
+ rte_mov64((uint8_t *)qat_req, (const uint8_t *)&(ctx->req_tmpl));
+ qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+
+ qat_req->pke_mid.src_data_addr = cookie->input_addr;
+ qat_req->pke_mid.dest_data_addr = cookie->output_addr;
+
+ goto error;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_pke_request));
+#endif
+ return 0;
+error:
+ qat_req->output_param_count = 0;
+ qat_req->input_param_count = 0;
+ qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+ cookie->error |= QAT_ASYM_ERROR_INVALID_PARAM;
+
+ return 0;
+}
+
+void
+qat_asym_process_response(void **op, uint8_t *resp,
+ void *op_cookie)
+{
+ struct icp_qat_fw_pke_resp *resp_msg =
+ (struct icp_qat_fw_pke_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque);
+ struct qat_asym_op_cookie *cookie = op_cookie;
+
+ *op = rx_op;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ if (cookie->error) {
+ cookie->error = 0;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ QAT_DP_LOG(ERR, "Cookie status returned error");
+ } else {
+ if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ QAT_DP_LOG(ERR, "Asymmetric response status returned error");
+ }
+ if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ QAT_DP_LOG(ERR, "Asymmetric common status returned error");
+ }
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+ sizeof(struct icp_qat_fw_pke_resp));
+#endif
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int err;
+ void *sess_private_data;
+
+ err = -EINVAL;
+ goto error;
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ QAT_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ qat_asym_build_req_tmpl(sess_private_data, xform);
+
+ set_asym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+error:
+ return err;
+}
+
+unsigned int qat_asym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+}
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_asym_session_private_data(sess, index);
+ struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
+
+ if (sess_priv) {
+ memset(s, 0, qat_asym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_asym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _QAT_ASYM_H_
+#define _QAT_ASYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto_asym.h>
+#include "icp_qat_fw_pke.h"
+#include "qat_common.h"
+#include "qat_asym_pmd.h"
+#include "icp_qat_fw.h"
+
+typedef uint64_t large_int_ptr;
+#define MAX_PKE_PARAMS 8
+#define QAT_PKE_MAX_LN_SIZE 512
+#define _PKE_ALIGN_ __attribute__((__aligned__(8)))
+
+#define QAT_ASYM_ERROR_INVALID_PARAM 0x01
+
+struct qat_asym_op_cookie {
+ size_t alg_size;
+ uint64_t error;
+ rte_iova_t input_addr;
+ rte_iova_t output_addr;
+ large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+ large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+ union {
+ uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
+ uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
+ } _PKE_ALIGN_;
+ uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
+} _PKE_ALIGN_;
+
+enum qat_asym_alg {
+ QAT_PKE_RSA,
+ QAT_PKE_DH,
+ QAT_PKE_DSA,
+ QAT_PKE_MODEXP,
+ QAT_PKE_MODINV,
+};
+
+struct qat_asym_session {
+ enum qat_asym_alg alg;
+ struct icp_qat_fw_pke_request req_tmpl;
+ uint64_t flags;
+};
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool);
+
+unsigned int
+qat_asym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess);
+
+/*
+ * Build PKE request to be sent to the fw, partially uses template
+ * request generated during session creation.
+ *
+ * @param in_op Pointer to the crypto operation, for every
+ * service it points to service specific struct.
+ * @param out_msg Message to be returned to enqueue function
+ * @param op_cookie Cookie pointer that holds private metadata
+ * @param qat_dev_gen Generation of QAT hardware
+ *
+ * @return
+ * This function always returns zero,
+ * it is because of backward compatibility.
+ * - 0: Always returned
+ *
+ */
+int
+qat_asym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+/*
+ * Process PKE response received from outgoing queue of QAT
+ *
+ * @param op a ptr to the rte_crypto_op referred to by
+ * the response message is returned in this param
+ * @param resp icp_qat_fw_pke_resp message received from
+ * outgoing fw message queue
+ * @param op_cookie Cookie pointer that holds private metadata
+ *
+ */
+void
+qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
+ void *op_cookie);
+
+#endif /* _QAT_ASYM_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _QAT_ASYM_CAPABILITIES_H_
+#define _QAT_ASYM_CAPABILITIES_H_
+
+#define QAT_BASE_GEN1_ASYM_CAPABILITIES
+
+#endif /* _QAT_ASYM_CAPABILITIES_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_logs.h"
+
+#include "qat_asym.h"
+#include "qat_asym_pmd.h"
+#include "qat_sym_capabilities.h"
+#include "qat_asym_capabilities.h"
+
+uint8_t cryptodev_qat_asym_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_asym_capabilities[] = {
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int qat_asym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int qat_asym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int qat_asym_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static void qat_asym_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+
+}
+
+static int qat_asym_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_asym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qat_asym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_asym_dev_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *asym_hw_qps =
+ qat_gen_config[internals->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_ASYMMETRIC];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs = qat_qps_per_service(asym_hw_qps,
+ QAT_SERVICE_ASYMMETRIC);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->driver_id = cryptodev_qat_asym_driver_id;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void qat_asym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_asym_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_ASYMMETRIC);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void qat_asym_stats_reset(struct rte_cryptodev *dev)
+{
+ struct qat_asym_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid asymmetric cryptodev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_ASYMMETRIC);
+}
+
+static int qat_asym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id)
+{
+ struct qat_asym_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release asym qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_ASYMMETRIC][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int qat_asym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct qat_qp_config qat_qp_conf;
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_asym_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *asym_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_ASYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = asym_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_asym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(asym_hw_qps, QAT_SERVICE_ASYMMETRIC)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_asym_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_asym_op_cookie);
+ qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "asym";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_ASYMMETRIC][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ int j;
+
+ struct qat_asym_op_cookie __rte_unused *cookie =
+ qp->op_cookies[i];
+ cookie->input_addr = rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ input_params_ptrs);
+
+ cookie->output_addr = rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ output_params_ptrs);
+
+ for (j = 0; j < 8; j++) {
+ cookie->input_params_ptrs[j] =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ input_array[j]);
+ cookie->output_params_ptrs[j] =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ output_array[j]);
+ }
+ }
+
+ return ret;
+}
+
+struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_asym_dev_config,
+ .dev_start = qat_asym_dev_start,
+ .dev_stop = qat_asym_dev_stop,
+ .dev_close = qat_asym_dev_close,
+ .dev_infos_get = qat_asym_dev_info_get,
+
+ .stats_get = qat_asym_stats_get,
+ .stats_reset = qat_asym_stats_reset,
+ .queue_pair_setup = qat_asym_qp_setup,
+ .queue_pair_release = qat_asym_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .asym_session_get_size = qat_asym_session_get_private_size,
+ .asym_session_configure = qat_asym_session_configure,
+ .asym_session_clear = qat_asym_session_clear
+};
+
+uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+ .name = qat_asym_drv_name,
+ .alias = qat_asym_drv_name
+};
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_asym_dev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_asym_dev_private *internals;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "asym");
+ QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_pci_dev->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+ qat_pci_dev->asym_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->asym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_pci_dev->asym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_pci_dev->asym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = cryptodev_qat_asym_driver_id;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ qat_pci_dev->asym_dev = internals;
+
+ internals->asym_dev_id = cryptodev->data->dev_id;
+ internals->qat_dev_capabilities = qat_gen1_asym_capabilities;
+
+ QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->asym_dev_id);
+ return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->asym_dev == NULL)
+ return 0;
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(
+ qat_pci_dev->asym_dev->asym_dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_dev->asym_rte_dev.name = NULL;
+ qat_pci_dev->asym_dev = NULL;
+
+ return 0;
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_asym_driver,
+ cryptodev_qat_asym_driver_id);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+
+#ifndef _QAT_ASYM_PMD_H_
+#define _QAT_ASYM_PMD_H_
+
+#include <rte_cryptodev.h>
+#include "qat_device.h"
+
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym
+
+
+extern uint8_t cryptodev_qat_asym_driver_id;
+
+/** private data structure for a QAT device.
+ * This QAT device is a device offering only asymmetric crypto service,
+ * there can be one of these on each qat_pci_device (VF).
+ */
+struct qat_asym_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ uint8_t asym_dev_id;
+ /**< Device instance for this rte_cryptodev */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+ /* QAT device asymmetric crypto capabilities */
+};
+
+uint16_t
+qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint16_t
+qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+int qat_asym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool);
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif /* _QAT_ASYM_PMD_H_ */
return 0;
}
-
static struct cryptodev_driver qat_crypto_drv;
RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
cryptodev_qat_sym_driver,
/** private data structure for a QAT device.
* This QAT device is a device offering only symmetric crypto service,
- * there can be one of these on each qat_pci_device (VF),
- * in future there may also be private data structures for other services.
+ * there can be one of these on each qat_pci_device (VF).
*/
struct qat_sym_dev_private {
struct qat_pci_device *qat_dev;
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT_SYM) += -lrte_pmd_qat -lcrypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT_ASYM) += -lrte_pmd_qat -lcrypto
endif # CONFIG_RTE_LIBRTE_PMD_QAT
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -lrte_pmd_snow3g
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g