SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_device.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_sym_session.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_common.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
# export include files
endif
sources = files('qat_sym.c', 'qat_qp.c',
'qat_sym_session.c',
+ 'qat_common.c',
'rte_qat_cryptodev.c',
'qat_device.c')
includes += include_directories('qat_adf')
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_common.h"
+#include "qat_logs.h"
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
+ struct qat_sgl *list, uint32_t data_len)
+{
+ int nr = 1;
+
+ uint32_t buf_len = rte_pktmbuf_iova(buf) -
+ buf_start + rte_pktmbuf_data_len(buf);
+
+ list->buffers[0].addr = buf_start;
+ list->buffers[0].resrvd = 0;
+ list->buffers[0].len = buf_len;
+
+ if (data_len <= buf_len) {
+ list->num_bufs = nr;
+ list->buffers[0].len = data_len;
+ return 0;
+ }
+
+ buf = buf->next;
+ while (buf) {
+ if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+ PMD_DRV_LOG(ERR,
+ "QAT PMD exceeded size of QAT SGL entry(%u)",
+ QAT_SGL_MAX_NUMBER);
+ return -EINVAL;
+ }
+
+ list->buffers[nr].len = rte_pktmbuf_data_len(buf);
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = rte_pktmbuf_iova(buf);
+
+ buf_len += list->buffers[nr].len;
+ buf = buf->next;
+
+ if (buf_len > data_len) {
+ list->buffers[nr].len -=
+ buf_len - data_len;
+ buf = NULL;
+ }
+ ++nr;
+ }
+ list->num_bufs = nr;
+
+ return 0;
+}
#include <stdint.h>
+#include <rte_mbuf.h>
+
/**< Intel(R) QAT Symmetric Crypto PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
} __rte_packed __rte_cache_aligned;
-struct qat_sym_op_cookie {
- struct qat_sgl qat_sgl_src;
- struct qat_sgl qat_sgl_dst;
- phys_addr_t qat_sgl_src_phys_addr;
- phys_addr_t qat_sgl_dst_phys_addr;
-};
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
+ struct qat_sgl *list, uint32_t data_len);
#endif /* _QAT_COMMON_H_ */
return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
}
-static inline int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
- struct qat_sgl *list, uint32_t data_len)
-{
- int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buff_start + rte_pktmbuf_data_len(buf);
-
- list->buffers[0].addr = buff_start;
- list->buffers[0].resrvd = 0;
- list->buffers[0].len = buf_len;
-
- if (data_len <= buf_len) {
- list->num_bufs = nr;
- list->buffers[0].len = data_len;
- return 0;
- }
-
- buf = buf->next;
- while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
- " entry(%u)",
- QAT_SGL_MAX_NUMBER);
- return -EINVAL;
- }
-
- list->buffers[nr].len = rte_pktmbuf_data_len(buf);
- list->buffers[nr].resrvd = 0;
- list->buffers[nr].addr = rte_pktmbuf_iova(buf);
-
- buf_len += list->buffers[nr].len;
- buf = buf->next;
-
- if (buf_len > data_len) {
- list->buffers[nr].len -=
- buf_len - data_len;
- buf = NULL;
- }
- ++nr;
- }
- list->num_bufs = nr;
-
- return 0;
-}
-
static inline void
set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
struct icp_qat_fw_la_cipher_req_params *cipher_param,
struct qat_sym_session;
+struct qat_sym_op_cookie {
+ struct qat_sgl qat_sgl_src;
+ struct qat_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen);