1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
7 #include "qat_common.h"
8 #include "adf_transport_access_macros.h"
10 struct qat_pci_device;
12 #define QAT_CSR_HEAD_WRITE_THRESH 32U
13 /* number of requests to accumulate before writing head CSR */
15 #define QAT_QP_MIN_INFL_THRESHOLD 256
17 /* Default qp configuration for GEN4 devices */
18 #define QAT_GEN4_QP_DEFCON (QAT_SERVICE_SYMMETRIC | \
19 QAT_SERVICE_SYMMETRIC << 8 | \
20 QAT_SERVICE_SYMMETRIC << 16 | \
21 QAT_SERVICE_SYMMETRIC << 24)
23 /* QAT GEN 4 specific macros */
24 #define QAT_GEN4_BUNDLE_NUM 4
25 #define QAT_GEN4_QPS_PER_BUNDLE_NUM 1
28 * Structure with data needed for creation of queue pair.
30 struct qat_qp_hw_data {
31 enum qat_service_type service_type;
32 uint8_t hw_bundle_num;
40 * Structure with data needed for creation of queue pair on gen4.
42 struct qat_qp_gen4_data {
43 struct qat_qp_hw_data qat_qp_hw_data;
49 * Structure with data needed for creation of queue pair.
51 struct qat_qp_config {
52 const struct qat_qp_hw_data *hw;
53 uint32_t nb_descriptors;
56 const char *service_str;
60 * Structure associated with each queue.
63 char memz_name[RTE_MEMZONE_NAMESIZE];
64 void *base_addr; /* Base address */
65 rte_iova_t base_phys_addr; /* Queue physical address */
66 uint32_t head; /* Shadow copy of the head */
67 uint32_t tail; /* Shadow copy of the tail */
72 uint8_t hw_bundle_number;
73 uint8_t hw_queue_number;
74 /* HW queue aka ring offset on bundle */
75 uint32_t csr_head; /* last written head value */
76 uint32_t csr_tail; /* last written tail value */
77 uint16_t nb_processed_responses;
78 /* number of responses processed since last CSR head write */
83 struct qat_queue tx_q;
84 struct qat_queue rx_q;
85 struct qat_common_stats stats;
86 struct rte_mempool *op_cookie_pool;
88 uint32_t nb_descriptors;
89 enum qat_device_gen qat_dev_gen;
90 enum qat_service_type service_type;
91 struct qat_pci_device *qat_dev;
92 /**< qat device this qp is on */
94 uint32_t dequeued __rte_aligned(4);
95 uint16_t max_inflights;
96 uint16_t min_enq_burst_threshold;
97 } __rte_cache_aligned;
99 extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
100 extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
103 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
106 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
109 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
112 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
115 qat_qp_setup(struct qat_pci_device *qat_dev,
116 struct qat_qp **qp_addr, uint16_t queue_pair_id,
117 struct qat_qp_config *qat_qp_conf);
120 qat_qps_per_service(struct qat_pci_device *qat_dev,
121 enum qat_service_type service);
124 qat_cq_get_fw_version(struct qat_qp *qp);
126 /* Needed for weak function*/
128 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
129 void *op_cookie __rte_unused,
130 uint64_t *dequeue_err_count __rte_unused);
133 qat_select_valid_queue(struct qat_pci_device *qat_dev, int qp_id,
134 enum qat_service_type service_type);
137 qat_read_qp_config(struct qat_pci_device *qat_dev);
139 #endif /* _QAT_QP_H_ */