int
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
- struct qat_sgl *list, uint32_t data_len)
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs)
{
int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buf_start + rte_pktmbuf_data_len(buf);
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+ /* buf_start allows the first buffer to start at an address before or
+ * after the mbuf data start. It's used to either optimally align the
+ * dma to 64 or to start dma from an offset.
+ */
+ uint32_t buf_len;
+ uint32_t first_buf_len = rte_pktmbuf_data_len(buf) +
+ (rte_pktmbuf_mtophys(buf) - buf_start);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint8_t *virt_addr[max_segs];
+ virt_addr[0] = rte_pktmbuf_mtod(buf, uint8_t*) +
+ (rte_pktmbuf_mtophys(buf) - buf_start);
+#endif
list->buffers[0].addr = buf_start;
list->buffers[0].resrvd = 0;
- list->buffers[0].len = buf_len;
+ list->buffers[0].len = first_buf_len;
- if (data_len <= buf_len) {
+ if (data_len <= first_buf_len) {
list->num_bufs = nr;
list->buffers[0].len = data_len;
- return 0;
+ goto sgl_end;
}
buf = buf->next;
+ buf_len = first_buf_len;
while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- QAT_LOG(ERR,
- "QAT PMD exceeded size of QAT SGL entry(%u)",
- QAT_SGL_MAX_NUMBER);
+ if (unlikely(nr == max_segs)) {
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ max_segs);
return -EINVAL;
}
list->buffers[nr].len = rte_pktmbuf_data_len(buf);
list->buffers[nr].resrvd = 0;
- list->buffers[nr].addr = rte_pktmbuf_iova(buf);
-
+ list->buffers[nr].addr = rte_pktmbuf_mtophys(buf);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ virt_addr[nr] = rte_pktmbuf_mtod(buf, uint8_t*);
+#endif
buf_len += list->buffers[nr].len;
buf = buf->next;
- if (buf_len > data_len) {
+ if (buf_len >= data_len) {
list->buffers[nr].len -=
buf_len - data_len;
buf = NULL;
}
list->num_bufs = nr;
+sgl_end:
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ {
+ uint16_t i;
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (i = 0; i < list->num_bufs; i++) {
+ QAT_DP_LOG(INFO,
+ "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ i, list->buffers[i].len,
+ list->buffers[i].addr);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+ virt_addr[i], list->buffers[i].len);
+ }
+ }
+#endif
+
return 0;
}
/**< Intel(R) QAT device name for PCI registration */
#define QAT_PCI_NAME qat
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER 16
-
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
/* Intel(R) QuickAssist Technology device generation is enumerated
QAT_SERVICE_COMPRESSION,
QAT_SERVICE_INVALID
};
+
#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
/**< Common struct for scatter-gather list operations */
uint64_t addr;
} __rte_packed;
+#define qat_sgl_hdr struct { \
+ uint64_t resrvd; \
+ uint32_t num_bufs; \
+ uint32_t num_mapped_bufs; \
+}
+
+__extension__
struct qat_sgl {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
+ qat_sgl_hdr;
+ /* flexible array of flat buffers*/
+ struct qat_flat_buf buffers[0];
} __rte_packed __rte_cache_aligned;
/** Common, i.e. not service-specific, statistics */
int
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
- struct qat_sgl *list, uint32_t data_len);
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs);
void
qat_stats_get(struct qat_pci_device *dev,
struct qat_common_stats *stats,
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &cookie->qat_sgl_src,
- qat_req->comn_mid.src_length);
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
if (unlikely(ret)) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
cookie->qat_sgl_src_phys_addr;
else {
ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &cookie->qat_sgl_dst,
- qat_req->comn_mid.dst_length);
+ dst_buf_start,
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
if (unlikely(ret)) {
QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
*/
#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
struct qat_sym_session;
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
struct qat_sym_op_cookie {
- struct qat_sgl qat_sgl_src;
- struct qat_sgl qat_sgl_dst;
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
};