From: Arek Kusztal Date: Fri, 23 Dec 2016 12:51:09 +0000 (+0000) Subject: crypto/qat: add SGL capability X-Git-Tag: spdx-start~4716 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=e09231eaa2af;p=dpdk.git crypto/qat: add SGL capability This commit adds scatter-gather list capability to Intel QuickAssist Technology driver. Signed-off-by: Arek Kusztal Acked-by: Fiona Trahe --- diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst index ab3972664a..9ecd19b211 100644 --- a/doc/guides/cryptodevs/qat.rst +++ b/doc/guides/cryptodevs/qat.rst @@ -74,7 +74,6 @@ Hash algorithms: Limitations ----------- -* Chained mbufs are not supported. * Hash only is not supported except SNOW 3G UIA2 and KASUMI F9. * Only supports the session-oriented API implementation (session-less APIs are not supported). * SNOW 3G (UEA2) and KASUMI (F8) supported only if cipher length, cipher offset fields are byte-aligned. diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst index c621c07dd1..dfc1b96500 100644 --- a/doc/guides/rel_notes/release_17_02.rst +++ b/doc/guides/rel_notes/release_17_02.rst @@ -153,6 +153,7 @@ New Features The QAT PMD was updated with additional support for: * DES algorithm. + * Scatter-gather list (SGL) support. * **Added Elastic Flow Distributor library (rte_efd).** diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h index 5409e1ec20..b9e3fd6c7b 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ b/drivers/crypto/qat/qat_adf/qat_algs.h @@ -47,6 +47,7 @@ #ifndef _ICP_QAT_ALGS_H_ #define _ICP_QAT_ALGS_H_ #include +#include #include "icp_qat_hw.h" #include "icp_qat_fw.h" #include "icp_qat_fw_la.h" @@ -79,13 +80,25 @@ struct qat_alg_buf { uint64_t addr; } __rte_packed; +/* + * Maximum number of SGL entries + */ +#define QAT_SGL_MAX_NUMBER 16 + struct qat_alg_buf_list { uint64_t resrvd; uint32_t num_bufs; uint32_t num_mapped_bufs; - struct qat_alg_buf bufers[]; + struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER]; } __rte_packed __rte_cache_aligned; +struct qat_crypto_op_cookie { + struct qat_alg_buf_list qat_sgl_list_src; + struct qat_alg_buf_list qat_sgl_list_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; +}; + /* Common content descriptor */ struct qat_alg_cd { struct icp_qat_hw_cipher_algo_blk cipher; diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index fa78c60f35..025341df07 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -523,7 +523,8 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift); static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg); +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie); void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session) @@ -900,9 +901,16 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, } while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail); + ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size]); if (ret != 0) { tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasnt sent + */ + rte_atomic16_sub(&tmp_qp->inflights16, + nb_ops_possible - nb_ops_sent); if (nb_ops_sent == 0) return 0; goto kick_tail; @@ -941,7 +949,7 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); + sizeof(struct icp_qat_fw_comn_resp)); #endif if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( @@ -972,8 +980,57 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, } static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) +qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start, + struct qat_alg_buf_list *list, uint32_t data_len) +{ + int nr = 1; + + uint32_t buf_len = rte_pktmbuf_mtophys(buf) - + buff_start + rte_pktmbuf_data_len(buf); + + list->bufers[0].addr = buff_start; + list->bufers[0].resrvd = 0; + list->bufers[0].len = buf_len; + + if (data_len <= buf_len) { + list->num_bufs = nr; + list->bufers[0].len = data_len; + return 0; + } + + buf = buf->next; + while (buf) { + if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { + PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL" + " entry(%u)", + QAT_SGL_MAX_NUMBER); + return -EINVAL; + } + + list->bufers[nr].len = rte_pktmbuf_data_len(buf); + list->bufers[nr].resrvd = 0; + list->bufers[nr].addr = rte_pktmbuf_mtophys(buf); + + buf_len += list->bufers[nr].len; + buf = buf->next; + + if (buf_len > data_len) { + list->bufers[nr].len -= + buf_len - data_len; + buf = NULL; + } + ++nr; + } + list->num_bufs = nr; + + return 0; +} + +static inline int +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie) { + int ret = 0; struct qat_session *ctx; struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; @@ -983,6 +1040,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) uint32_t auth_len = 0, auth_ofs = 0; uint32_t min_ofs = 0; uint64_t src_buf_start = 0, dst_buf_start = 0; + uint8_t do_sgl = 0; #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX @@ -1100,10 +1158,16 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) } + if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) + do_sgl = 1; + /* adjust for chain case */ if (do_cipher && do_auth) min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) + min_ofs = 0; + if (unlikely(op->sym->m_dst != NULL)) { /* Out-of-place operation (OOP) * Don't align DMA start. DMA the minimum data-set @@ -1113,6 +1177,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs); dst_buf_start = rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs); + } else { /* In-place operation * Start DMA at nearest aligned address below min_ofs @@ -1158,8 +1223,43 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) (cipher_param->cipher_offset + cipher_param->cipher_length) : (auth_param->auth_off + auth_param->auth_len); - qat_req->comn_mid.src_data_addr = src_buf_start; - qat_req->comn_mid.dest_data_addr = dst_buf_start; + if (do_sgl) { + + ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, + &qat_op_cookie->qat_sgl_list_src, + qat_req->comn_mid.src_length); + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + if (likely(op->sym->m_dst == NULL)) + qat_req->comn_mid.dest_data_addr = + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + else { + ret = qat_sgl_fill_array(op->sym->m_dst, + dst_buf_start, + &qat_op_cookie->qat_sgl_list_dst, + qat_req->comn_mid.dst_length); + + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot " + "fill sgl array"); + return ret; + } + + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + qat_req->comn_mid.dest_data_addr = + qat_op_cookie->qat_sgl_dst_phys_addr; + } + } else { + qat_req->comn_mid.src_data_addr = src_buf_start; + qat_req->comn_mid.dest_data_addr = dst_buf_start; + } if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { @@ -1191,7 +1291,6 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) } } - #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX rte_hexdump(stdout, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req)); diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 6b84488106..5766817b56 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -69,6 +69,9 @@ struct qat_qp { struct qat_queue tx_q; struct qat_queue rx_q; struct rte_cryptodev_stats stats; + struct rte_mempool *op_cookie_pool; + void **op_cookies; + uint32_t nb_descriptors; } __rte_cache_aligned; /** private data structure for each QAT device */ diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c index 2e7188bda2..b85b34dc1d 100644 --- a/drivers/crypto/qat/qat_qp.c +++ b/drivers/crypto/qat/qat_qp.c @@ -41,6 +41,7 @@ #include "qat_logs.h" #include "qat_crypto.h" +#include "qat_algs.h" #include "adf_transport_access_macros.h" #define ADF_MAX_SYM_DESC 4096 @@ -136,6 +137,8 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, { struct qat_qp *qp; int ret; + char op_cookie_pool_name[RTE_RING_NAMESIZE]; + uint32_t i; PMD_INIT_FUNC_TRACE(); @@ -166,7 +169,6 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, queue_pair_id); return -EINVAL; } - /* Allocate the queue pair data structure. */ qp = rte_zmalloc("qat PMD qp metadata", sizeof(*qp), RTE_CACHE_LINE_SIZE); @@ -174,6 +176,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct"); return -ENOMEM; } + qp->nb_descriptors = qp_conf->nb_descriptors; + qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", + qp_conf->nb_descriptors * sizeof(*qp->op_cookies), + RTE_CACHE_LINE_SIZE); + qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr; rte_atomic16_init(&qp->inflights16); @@ -191,8 +198,47 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, qat_queue_delete(&(qp->tx_q)); goto create_err; } + adf_configure_queues(qp); adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr); + snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu", + dev->driver->pci_drv.driver.name, dev->data->dev_id, + queue_pair_id); + + qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); + if (qp->op_cookie_pool == NULL) + qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, + qp->nb_descriptors, + sizeof(struct qat_crypto_op_cookie), 64, 0, + NULL, NULL, NULL, NULL, socket_id, + 0); + if (!qp->op_cookie_pool) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot create" + " op mempool"); + goto create_err; + } + + for (i = 0; i < qp->nb_descriptors; i++) { + if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie"); + return -EFAULT; + } + + struct qat_crypto_op_cookie *sql_cookie = + qp->op_cookies[i]; + + sql_cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2phy(qp->op_cookie_pool, + sql_cookie) + + offsetof(struct qat_crypto_op_cookie, + qat_sgl_list_src); + + sql_cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2phy(qp->op_cookie_pool, + sql_cookie) + + offsetof(struct qat_crypto_op_cookie, + qat_sgl_list_dst); + } dev->data->queue_pairs[queue_pair_id] = qp; return 0; @@ -205,6 +251,7 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) { struct qat_qp *qp = (struct qat_qp *)dev->data->queue_pairs[queue_pair_id]; + uint32_t i; PMD_INIT_FUNC_TRACE(); if (qp == NULL) { @@ -221,6 +268,14 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) } adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr); + + for (i = 0; i < qp->nb_descriptors; i++) + rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); + + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + + rte_free(qp->op_cookies); rte_free(qp); dev->data->queue_pairs[queue_pair_id] = NULL; return 0;