common/qat: fix queue pair config overrun
[dpdk.git] / drivers / common / qat / dev / qat_dev_gen1.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #include "qat_device.h"
6 #include "qat_qp.h"
7 #include "adf_transport_access_macros.h"
8 #include "qat_dev_gens.h"
9
10 #include <stdint.h>
11
12 #define ADF_ARB_REG_SLOT                        0x1000
13
14 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
15         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
16         (ADF_ARB_REG_SLOT * index), value)
17
18 __extension__
19 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
20                                          [ADF_MAX_QPS_ON_ANY_SERVICE] = {
21         /* queue pairs which provide an asymmetric crypto service */
22         [QAT_SERVICE_ASYMMETRIC] = {
23                 {
24                         .service_type = QAT_SERVICE_ASYMMETRIC,
25                         .hw_bundle_num = 0,
26                         .tx_ring_num = 0,
27                         .rx_ring_num = 8,
28                         .tx_msg_size = 64,
29                         .rx_msg_size = 32,
30
31                 }, {
32                         .service_type = QAT_SERVICE_ASYMMETRIC,
33                         .hw_bundle_num = 0,
34                         .tx_ring_num = 1,
35                         .rx_ring_num = 9,
36                         .tx_msg_size = 64,
37                         .rx_msg_size = 32,
38                 }
39         },
40         /* queue pairs which provide a symmetric crypto service */
41         [QAT_SERVICE_SYMMETRIC] = {
42                 {
43                         .service_type = QAT_SERVICE_SYMMETRIC,
44                         .hw_bundle_num = 0,
45                         .tx_ring_num = 2,
46                         .rx_ring_num = 10,
47                         .tx_msg_size = 128,
48                         .rx_msg_size = 32,
49                 },
50                 {
51                         .service_type = QAT_SERVICE_SYMMETRIC,
52                         .hw_bundle_num = 0,
53                         .tx_ring_num = 3,
54                         .rx_ring_num = 11,
55                         .tx_msg_size = 128,
56                         .rx_msg_size = 32,
57                 }
58         },
59         /* queue pairs which provide a compression service */
60         [QAT_SERVICE_COMPRESSION] = {
61                 {
62                         .service_type = QAT_SERVICE_COMPRESSION,
63                         .hw_bundle_num = 0,
64                         .tx_ring_num = 6,
65                         .rx_ring_num = 14,
66                         .tx_msg_size = 128,
67                         .rx_msg_size = 32,
68                 }, {
69                         .service_type = QAT_SERVICE_COMPRESSION,
70                         .hw_bundle_num = 0,
71                         .tx_ring_num = 7,
72                         .rx_ring_num = 15,
73                         .tx_msg_size = 128,
74                         .rx_msg_size = 32,
75                 }
76         }
77 };
78
79 const struct qat_qp_hw_data *
80 qat_qp_get_hw_data_gen1(struct qat_pci_device *dev __rte_unused,
81                 enum qat_service_type service_type, uint16_t qp_id)
82 {
83         return qat_gen1_qps[service_type] + qp_id;
84 }
85
86 int
87 qat_qp_rings_per_service_gen1(struct qat_pci_device *qat_dev,
88                 enum qat_service_type service)
89 {
90         int i = 0, count = 0;
91
92         for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
93                 const struct qat_qp_hw_data *hw_qps =
94                                 qat_qp_get_hw_data(qat_dev, service, i);
95
96                 if (hw_qps == NULL)
97                         continue;
98                 if (hw_qps->service_type == service && hw_qps->tx_msg_size)
99                         count++;
100         }
101
102         return count;
103 }
104
105 void
106 qat_qp_csr_build_ring_base_gen1(void *io_addr,
107                         struct qat_queue *queue)
108 {
109         uint64_t queue_base;
110
111         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
112                         queue->queue_size);
113         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
114                 queue->hw_queue_number, queue_base);
115 }
116
117 void
118 qat_qp_adf_arb_enable_gen1(const struct qat_queue *txq,
119                         void *base_addr, rte_spinlock_t *lock)
120 {
121         uint32_t arb_csr_offset = 0, value;
122
123         rte_spinlock_lock(lock);
124         arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
125                         (ADF_ARB_REG_SLOT *
126                         txq->hw_bundle_number);
127         value = ADF_CSR_RD(base_addr,
128                         arb_csr_offset);
129         value |= (0x01 << txq->hw_queue_number);
130         ADF_CSR_WR(base_addr, arb_csr_offset, value);
131         rte_spinlock_unlock(lock);
132 }
133
134 void
135 qat_qp_adf_arb_disable_gen1(const struct qat_queue *txq,
136                         void *base_addr, rte_spinlock_t *lock)
137 {
138         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
139                                 (ADF_ARB_REG_SLOT * txq->hw_bundle_number);
140         uint32_t value;
141
142         rte_spinlock_lock(lock);
143         value = ADF_CSR_RD(base_addr, arb_csr_offset);
144         value &= ~(0x01 << txq->hw_queue_number);
145         ADF_CSR_WR(base_addr, arb_csr_offset, value);
146         rte_spinlock_unlock(lock);
147 }
148
149 void
150 qat_qp_adf_configure_queues_gen1(struct qat_qp *qp)
151 {
152         uint32_t q_tx_config, q_resp_config;
153         struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
154
155         q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
156         q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
157                         ADF_RING_NEAR_WATERMARK_512,
158                         ADF_RING_NEAR_WATERMARK_0);
159         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr,
160                 q_tx->hw_bundle_number, q_tx->hw_queue_number,
161                 q_tx_config);
162         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr,
163                 q_rx->hw_bundle_number, q_rx->hw_queue_number,
164                 q_resp_config);
165 }
166
167 void
168 qat_qp_csr_write_tail_gen1(struct qat_qp *qp, struct qat_queue *q)
169 {
170         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
171                 q->hw_queue_number, q->tail);
172 }
173
174 void
175 qat_qp_csr_write_head_gen1(struct qat_qp *qp, struct qat_queue *q,
176                         uint32_t new_head)
177 {
178         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
179                         q->hw_queue_number, new_head);
180 }
181
182 void
183 qat_qp_csr_setup_gen1(struct qat_pci_device *qat_dev,
184                         void *io_addr, struct qat_qp *qp)
185 {
186         qat_qp_csr_build_ring_base_gen1(io_addr, &qp->tx_q);
187         qat_qp_csr_build_ring_base_gen1(io_addr, &qp->rx_q);
188         qat_qp_adf_configure_queues_gen1(qp);
189         qat_qp_adf_arb_enable_gen1(&qp->tx_q, qp->mmap_bar_addr,
190                                         &qat_dev->arb_csr_lock);
191 }
192
193 static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen1 = {
194         .qat_qp_rings_per_service = qat_qp_rings_per_service_gen1,
195         .qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1,
196         .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1,
197         .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1,
198         .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1,
199         .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1,
200         .qat_qp_csr_write_head = qat_qp_csr_write_head_gen1,
201         .qat_qp_csr_setup = qat_qp_csr_setup_gen1,
202         .qat_qp_get_hw_data = qat_qp_get_hw_data_gen1,
203 };
204
205 int
206 qat_reset_ring_pairs_gen1(struct qat_pci_device *qat_pci_dev __rte_unused)
207 {
208         /*
209          * Ring pairs reset not supported on base, continue
210          */
211         return 0;
212 }
213
214 const struct rte_mem_resource *
215 qat_dev_get_transport_bar_gen1(struct rte_pci_device *pci_dev)
216 {
217         return &pci_dev->mem_resource[0];
218 }
219
220 int
221 qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource __rte_unused,
222                 struct rte_pci_device *pci_dev __rte_unused)
223 {
224         return -1;
225 }
226
227 int
228 qat_dev_read_config_gen1(struct qat_pci_device *qat_dev __rte_unused)
229 {
230         /*
231          * Base generations do not have configuration,
232          * but set this pointer anyway that we can
233          * distinguish higher generations faulty set to NULL
234          */
235         return 0;
236 }
237
238 int
239 qat_dev_get_extra_size_gen1(void)
240 {
241         return 0;
242 }
243
244 static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen1 = {
245         .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
246         .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
247         .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
248         .qat_dev_read_config = qat_dev_read_config_gen1,
249         .qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
250 };
251
252 RTE_INIT(qat_dev_gen_gen1_init)
253 {
254         qat_qp_hw_spec[QAT_GEN1] = &qat_qp_hw_spec_gen1;
255         qat_dev_hw_spec[QAT_GEN1] = &qat_dev_hw_spec_gen1;
256         qat_gen_config[QAT_GEN1].dev_gen = QAT_GEN1;
257 }