1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_memzone.h>
11 #include "qat_common.h"
12 #include "qat_device.h"
13 #include "qat_crypto_capabilities.h"
16 * This macro rounds up a number to a be a multiple of
17 * the alignment when the alignment is a power of 2
19 #define ALIGN_POW2_ROUNDUP(num, align) \
20 (((num) + (align) - 1) & ~((align) - 1))
21 #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
23 #define QAT_CSR_HEAD_WRITE_THRESH 32U
24 /* number of requests to accumulate before writing head CSR */
25 #define QAT_CSR_TAIL_WRITE_THRESH 32U
26 /* number of requests to accumulate before writing tail CSR */
27 #define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
28 /* number of inflights below which no tail write coalescing should occur */
30 typedef int (*build_request_t)(void *op,
31 uint8_t *req, void *op_cookie,
32 enum qat_device_gen qat_dev_gen);
33 /**< Build a request from an op. */
35 typedef int (*process_response_t)(void **ops,
36 uint8_t *resp, void *op_cookie,
37 enum qat_device_gen qat_dev_gen);
38 /**< Process a response descriptor and return the associated op. */
40 struct qat_sym_session;
43 * Structure associated with each queue.
46 char memz_name[RTE_MEMZONE_NAMESIZE];
47 void *base_addr; /* Base address */
48 rte_iova_t base_phys_addr; /* Queue physical address */
49 uint32_t head; /* Shadow copy of the head */
50 uint32_t tail; /* Shadow copy of the tail */
53 uint16_t max_inflights;
55 uint8_t hw_bundle_number;
56 uint8_t hw_queue_number;
57 /* HW queue aka ring offset on bundle */
58 uint32_t csr_head; /* last written head value */
59 uint32_t csr_tail; /* last written tail value */
60 uint16_t nb_processed_responses;
61 /* number of responses processed since last CSR head write */
62 uint16_t nb_pending_requests;
63 /* number of requests pending since last CSR tail write */
69 struct qat_queue tx_q;
70 struct qat_queue rx_q;
71 struct rte_cryptodev_stats stats;
72 struct rte_mempool *op_cookie_pool;
74 uint32_t nb_descriptors;
75 enum qat_device_gen qat_dev_gen;
76 build_request_t build_request;
77 process_response_t process_response;
78 } __rte_cache_aligned;
82 qat_sym_build_request(void *in_op, uint8_t *out_msg,
83 void *op_cookie, enum qat_device_gen qat_dev_gen);
86 qat_sym_process_response(void **op, uint8_t *resp,
87 __rte_unused void *op_cookie, enum qat_device_gen qat_dev_gen);
89 void qat_sym_stats_get(struct rte_cryptodev *dev,
90 struct rte_cryptodev_stats *stats);
91 void qat_sym_stats_reset(struct rte_cryptodev *dev);
93 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
94 const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
95 struct rte_mempool *session_pool);
96 int qat_sym_qp_release(struct rte_cryptodev *dev,
97 uint16_t queue_pair_id);
101 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
105 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
108 #endif /* _QAT_SYM_H_ */