common/qat: add scatter-gather header
authorFiona Trahe <fiona.trahe@intel.com>
Mon, 23 Jul 2018 13:05:35 +0000 (14:05 +0100)
committerPablo de Lara <pablo.de.lara.guarch@intel.com>
Wed, 25 Jul 2018 06:19:54 +0000 (08:19 +0200)
This patch refactors the sgl struct so it includes a flexible
array of flat buffers as sym and compress PMDs can have
different size sgls.

Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
drivers/common/qat/qat_common.c
drivers/common/qat/qat_common.h
drivers/crypto/qat/qat_sym.c
drivers/crypto/qat/qat_sym.h

index c206d3b..bc04c07 100644 (file)
@@ -8,40 +8,53 @@
 
 int
 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
-               struct qat_sgl *list, uint32_t data_len)
+               void *list_in, uint32_t data_len,
+               const uint16_t max_segs)
 {
        int nr = 1;
-
-       uint32_t buf_len = rte_pktmbuf_iova(buf) -
-                       buf_start + rte_pktmbuf_data_len(buf);
+       struct qat_sgl *list = (struct qat_sgl *)list_in;
+       /* buf_start allows the first buffer to start at an address before or
+        * after the mbuf data start. It's used to either optimally align the
+        * dma to 64 or to start dma from an offset.
+        */
+       uint32_t buf_len;
+       uint32_t first_buf_len = rte_pktmbuf_data_len(buf) +
+                       (rte_pktmbuf_mtophys(buf) - buf_start);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       uint8_t *virt_addr[max_segs];
+       virt_addr[0] = rte_pktmbuf_mtod(buf, uint8_t*) +
+                       (rte_pktmbuf_mtophys(buf) - buf_start);
+#endif
 
        list->buffers[0].addr = buf_start;
        list->buffers[0].resrvd = 0;
-       list->buffers[0].len = buf_len;
+       list->buffers[0].len = first_buf_len;
 
-       if (data_len <= buf_len) {
+       if (data_len <= first_buf_len) {
                list->num_bufs = nr;
                list->buffers[0].len = data_len;
-               return 0;
+               goto sgl_end;
        }
 
        buf = buf->next;
+       buf_len = first_buf_len;
        while (buf) {
-               if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
-                       QAT_LOG(ERR,
-                               "QAT PMD exceeded size of QAT SGL entry(%u)",
-                                       QAT_SGL_MAX_NUMBER);
+               if (unlikely(nr == max_segs)) {
+                       QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+                                       max_segs);
                        return -EINVAL;
                }
 
                list->buffers[nr].len = rte_pktmbuf_data_len(buf);
                list->buffers[nr].resrvd = 0;
-               list->buffers[nr].addr = rte_pktmbuf_iova(buf);
-
+               list->buffers[nr].addr = rte_pktmbuf_mtophys(buf);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               virt_addr[nr] = rte_pktmbuf_mtod(buf, uint8_t*);
+#endif
                buf_len += list->buffers[nr].len;
                buf = buf->next;
 
-               if (buf_len > data_len) {
+               if (buf_len >= data_len) {
                        list->buffers[nr].len -=
                                buf_len - data_len;
                        buf = NULL;
@@ -50,6 +63,22 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
        }
        list->num_bufs = nr;
 
+sgl_end:
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       {
+               uint16_t i;
+               QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+               for (i = 0; i < list->num_bufs; i++) {
+                       QAT_DP_LOG(INFO,
+                               "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+                               i, list->buffers[i].len,
+                               list->buffers[i].addr);
+                       QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+                                       virt_addr[i], list->buffers[i].len);
+               }
+       }
+#endif
+
        return 0;
 }
 
index db85d54..b26aa26 100644 (file)
 
 /**< Intel(R) QAT device name for PCI registration */
 #define QAT_PCI_NAME   qat
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER     16
-
 #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
 
 /* Intel(R) QuickAssist Technology device generation is enumerated
@@ -31,6 +26,7 @@ enum qat_service_type {
        QAT_SERVICE_COMPRESSION,
        QAT_SERVICE_INVALID
 };
+
 #define QAT_MAX_SERVICES               (QAT_SERVICE_INVALID)
 
 /**< Common struct for scatter-gather list operations */
@@ -40,11 +36,17 @@ struct qat_flat_buf {
        uint64_t addr;
 } __rte_packed;
 
+#define qat_sgl_hdr  struct { \
+       uint64_t resrvd; \
+       uint32_t num_bufs; \
+       uint32_t num_mapped_bufs; \
+}
+
+__extension__
 struct qat_sgl {
-       uint64_t resrvd;
-       uint32_t num_bufs;
-       uint32_t num_mapped_bufs;
-       struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
+       qat_sgl_hdr;
+       /* flexible array of flat buffers*/
+       struct qat_flat_buf buffers[0];
 } __rte_packed __rte_cache_aligned;
 
 /** Common, i.e. not service-specific, statistics */
@@ -64,7 +66,8 @@ struct qat_pci_device;
 
 int
 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
-               struct qat_sgl *list, uint32_t data_len);
+               void *list_in, uint32_t data_len,
+               const uint16_t max_segs);
 void
 qat_stats_get(struct qat_pci_device *dev,
                struct qat_common_stats *stats,
index 4ed7d95..8273968 100644 (file)
@@ -495,8 +495,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
                                QAT_COMN_PTR_TYPE_SGL);
                ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
-                               &cookie->qat_sgl_src,
-                               qat_req->comn_mid.src_length);
+                                       &cookie->qat_sgl_src,
+                                       qat_req->comn_mid.src_length,
+                                       QAT_SYM_SGL_MAX_NUMBER);
 
                if (unlikely(ret)) {
                        QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
@@ -509,9 +510,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                                cookie->qat_sgl_src_phys_addr;
                else {
                        ret = qat_sgl_fill_array(op->sym->m_dst,
-                                       dst_buf_start,
-                                       &cookie->qat_sgl_dst,
-                                               qat_req->comn_mid.dst_length);
+                                                dst_buf_start,
+                                                &cookie->qat_sgl_dst,
+                                                qat_req->comn_mid.dst_length,
+                                                QAT_SYM_SGL_MAX_NUMBER);
 
                        if (unlikely(ret)) {
                                QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
index e4e1ae8..bc6426c 100644 (file)
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
 struct qat_sym_session;
 
+struct qat_sym_sgl {
+       qat_sgl_hdr;
+       struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
 struct qat_sym_op_cookie {
-       struct qat_sgl qat_sgl_src;
-       struct qat_sgl qat_sgl_dst;
+       struct qat_sym_sgl qat_sgl_src;
+       struct qat_sym_sgl qat_sgl_dst;
        phys_addr_t qat_sgl_src_phys_addr;
        phys_addr_t qat_sgl_dst_phys_addr;
 };