1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #include "qat_device.h"
7 #include "adf_transport_access_macros.h"
8 #include "qat_dev_gens.h"
12 #define ADF_ARB_REG_SLOT 0x1000
14 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
15 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
16 (ADF_ARB_REG_SLOT * index), value)
19 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
20 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
21 /* queue pairs which provide an asymmetric crypto service */
22 [QAT_SERVICE_ASYMMETRIC] = {
24 .service_type = QAT_SERVICE_ASYMMETRIC,
32 .service_type = QAT_SERVICE_ASYMMETRIC,
40 /* queue pairs which provide a symmetric crypto service */
41 [QAT_SERVICE_SYMMETRIC] = {
43 .service_type = QAT_SERVICE_SYMMETRIC,
51 .service_type = QAT_SERVICE_SYMMETRIC,
59 /* queue pairs which provide a compression service */
60 [QAT_SERVICE_COMPRESSION] = {
62 .service_type = QAT_SERVICE_COMPRESSION,
69 .service_type = QAT_SERVICE_COMPRESSION,
79 const struct qat_qp_hw_data *
80 qat_qp_get_hw_data_gen1(struct qat_pci_device *dev __rte_unused,
81 enum qat_service_type service_type, uint16_t qp_id)
83 return qat_gen1_qps[service_type] + qp_id;
87 qat_qp_rings_per_service_gen1(struct qat_pci_device *qat_dev,
88 enum qat_service_type service)
92 for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
93 const struct qat_qp_hw_data *hw_qps =
94 qat_qp_get_hw_data(qat_dev, service, i);
98 if (hw_qps->service_type == service && hw_qps->tx_msg_size)
106 qat_qp_csr_build_ring_base_gen1(void *io_addr,
107 struct qat_queue *queue)
111 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
113 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
114 queue->hw_queue_number, queue_base);
118 qat_qp_adf_arb_enable_gen1(const struct qat_queue *txq,
119 void *base_addr, rte_spinlock_t *lock)
121 uint32_t arb_csr_offset = 0, value;
123 rte_spinlock_lock(lock);
124 arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
126 txq->hw_bundle_number);
127 value = ADF_CSR_RD(base_addr,
129 value |= (0x01 << txq->hw_queue_number);
130 ADF_CSR_WR(base_addr, arb_csr_offset, value);
131 rte_spinlock_unlock(lock);
135 qat_qp_adf_arb_disable_gen1(const struct qat_queue *txq,
136 void *base_addr, rte_spinlock_t *lock)
138 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
139 (ADF_ARB_REG_SLOT * txq->hw_bundle_number);
142 rte_spinlock_lock(lock);
143 value = ADF_CSR_RD(base_addr, arb_csr_offset);
144 value &= ~(0x01 << txq->hw_queue_number);
145 ADF_CSR_WR(base_addr, arb_csr_offset, value);
146 rte_spinlock_unlock(lock);
150 qat_qp_adf_configure_queues_gen1(struct qat_qp *qp)
152 uint32_t q_tx_config, q_resp_config;
153 struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
155 q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
156 q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
157 ADF_RING_NEAR_WATERMARK_512,
158 ADF_RING_NEAR_WATERMARK_0);
159 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr,
160 q_tx->hw_bundle_number, q_tx->hw_queue_number,
162 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr,
163 q_rx->hw_bundle_number, q_rx->hw_queue_number,
168 qat_qp_csr_write_tail_gen1(struct qat_qp *qp, struct qat_queue *q)
170 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
171 q->hw_queue_number, q->tail);
175 qat_qp_csr_write_head_gen1(struct qat_qp *qp, struct qat_queue *q,
178 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
179 q->hw_queue_number, new_head);
183 qat_qp_csr_setup_gen1(struct qat_pci_device *qat_dev,
184 void *io_addr, struct qat_qp *qp)
186 qat_qp_csr_build_ring_base_gen1(io_addr, &qp->tx_q);
187 qat_qp_csr_build_ring_base_gen1(io_addr, &qp->rx_q);
188 qat_qp_adf_configure_queues_gen1(qp);
189 qat_qp_adf_arb_enable_gen1(&qp->tx_q, qp->mmap_bar_addr,
190 &qat_dev->arb_csr_lock);
193 static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen1 = {
194 .qat_qp_rings_per_service = qat_qp_rings_per_service_gen1,
195 .qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1,
196 .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1,
197 .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1,
198 .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1,
199 .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1,
200 .qat_qp_csr_write_head = qat_qp_csr_write_head_gen1,
201 .qat_qp_csr_setup = qat_qp_csr_setup_gen1,
202 .qat_qp_get_hw_data = qat_qp_get_hw_data_gen1,
206 qat_reset_ring_pairs_gen1(struct qat_pci_device *qat_pci_dev __rte_unused)
209 * Ring pairs reset not supported on base, continue
214 const struct rte_mem_resource *
215 qat_dev_get_transport_bar_gen1(struct rte_pci_device *pci_dev)
217 return &pci_dev->mem_resource[0];
221 qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource __rte_unused,
222 struct rte_pci_device *pci_dev __rte_unused)
228 qat_dev_read_config_gen1(struct qat_pci_device *qat_dev __rte_unused)
231 * Base generations do not have configuration,
232 * but set this pointer anyway that we can
233 * distinguish higher generations faulty set to NULL
239 qat_dev_get_extra_size_gen1(void)
244 static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen1 = {
245 .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
246 .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
247 .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
248 .qat_dev_read_config = qat_dev_read_config_gen1,
249 .qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
252 RTE_INIT(qat_dev_gen_gen1_init)
254 qat_qp_hw_spec[QAT_GEN1] = &qat_qp_hw_spec_gen1;
255 qat_dev_hw_spec[QAT_GEN1] = &qat_dev_hw_spec_gen1;
256 qat_gen_config[QAT_GEN1].dev_gen = QAT_GEN1;