4d8860fc36da344bea63899bc2af8904b8108ba7
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
9 #include <rte_eal.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_pci.h>
14
15 #include "otx2_evdev_stats.h"
16 #include "otx2_evdev.h"
17 #include "otx2_irq.h"
18 #include "otx2_tim_evdev.h"
19
20 static inline int
21 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
22 {
23         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
24         uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
25         struct otx2_mbox *mbox = dev->mbox;
26         struct msix_offset_rsp *msix_rsp;
27         int i, rc;
28
29         /* Get SSO and SSOW MSIX vector offsets */
30         otx2_mbox_alloc_msg_msix_offset(mbox);
31         rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
32
33         for (i = 0; i < nb_ports; i++)
34                 dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
35
36         for (i = 0; i < dev->nb_event_queues; i++)
37                 dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
38
39         return rc;
40 }
41
42 void
43 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
44 {
45         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
46         /* Single WS modes */
47         const event_dequeue_t ssogws_deq[2][2][2][2][2][2] = {
48 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
49                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
50 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
51 #undef R
52         };
53
54         const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2] = {
55 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
56                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
57 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
58 #undef R
59         };
60
61         const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2] = {
62 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
63                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
64 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
65 #undef R
66         };
67
68         const event_dequeue_burst_t
69                 ssogws_deq_timeout_burst[2][2][2][2][2][2] = {
70 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
71                 [f5][f4][f3][f2][f1][f0] =                              \
72                         otx2_ssogws_deq_timeout_burst_ ##name,
73 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
74 #undef R
75         };
76
77         const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2] = {
78 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
79                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
80 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
81 #undef R
82         };
83
84         const event_dequeue_burst_t ssogws_deq_seg_burst[2][2][2][2][2][2] = {
85 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
86                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_burst_ ##name,
87 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
88 #undef R
89         };
90
91         const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2] = {
92 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
93                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_timeout_ ##name,
94 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
95 #undef R
96         };
97
98         const event_dequeue_burst_t
99                 ssogws_deq_seg_timeout_burst[2][2][2][2][2][2] = {
100 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
101                 [f5][f4][f3][f2][f1][f0] =                              \
102                                 otx2_ssogws_deq_seg_timeout_burst_ ##name,
103 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
104 #undef R
105         };
106
107
108         /* Dual WS modes */
109         const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2] = {
110 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
111                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
112 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
113 #undef R
114         };
115
116         const event_dequeue_burst_t ssogws_dual_deq_burst[2][2][2][2][2][2] = {
117 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
118                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_burst_ ##name,
119 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
120 #undef R
121         };
122
123         const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2] = {
124 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
125                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_ ##name,
126 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
127 #undef R
128         };
129
130         const event_dequeue_burst_t
131                 ssogws_dual_deq_timeout_burst[2][2][2][2][2][2] = {
132 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
133         [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_burst_ ##name,
134 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
135 #undef R
136         };
137
138         const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2] = {
139 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
140                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
141 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
142 #undef R
143         };
144
145         const event_dequeue_burst_t
146                 ssogws_dual_deq_seg_burst[2][2][2][2][2][2] = {
147 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
148                 [f5][f4][f3][f2][f1][f0] =                              \
149                                 otx2_ssogws_dual_deq_seg_burst_ ##name,
150 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
151 #undef R
152         };
153
154         const event_dequeue_t ssogws_dual_deq_seg_timeout[2][2][2][2][2][2] = {
155 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
156                 [f5][f4][f3][f2][f1][f0] =                              \
157                                 otx2_ssogws_dual_deq_seg_timeout_ ##name,
158 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
159 #undef R
160         };
161
162         const event_dequeue_burst_t
163                 ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2] = {
164 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
165         [f5][f4][f3][f2][f1][f0] =                                      \
166                 otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
167 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
168 #undef R
169         };
170
171         /* Tx modes */
172         const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2][2] = {
173 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
174                 [f5][f4][f3][f2][f1][f0] =  otx2_ssogws_tx_adptr_enq_ ## name,
175 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
176 #undef T
177         };
178
179         const event_tx_adapter_enqueue
180                 ssogws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
181 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
182                 [f5][f4][f3][f2][f1][f0] =                              \
183                         otx2_ssogws_tx_adptr_enq_seg_ ## name,
184 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
185 #undef T
186         };
187
188         const event_tx_adapter_enqueue
189                 ssogws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
190 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
191                 [f5][f4][f3][f2][f1][f0] =                              \
192                         otx2_ssogws_dual_tx_adptr_enq_ ## name,
193 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
194 #undef T
195         };
196
197         const event_tx_adapter_enqueue
198                 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
199 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
200                 [f5][f4][f3][f2][f1][f0] =                              \
201                         otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
202 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
203 #undef T
204         };
205
206         event_dev->enqueue                      = otx2_ssogws_enq;
207         event_dev->enqueue_burst                = otx2_ssogws_enq_burst;
208         event_dev->enqueue_new_burst            = otx2_ssogws_enq_new_burst;
209         event_dev->enqueue_forward_burst        = otx2_ssogws_enq_fwd_burst;
210         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
211                 event_dev->dequeue              = ssogws_deq_seg
212                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
213                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
214                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
215                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
216                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
217                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
218                 event_dev->dequeue_burst        = ssogws_deq_seg_burst
219                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
220                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
221                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
222                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
223                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
224                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
225                 if (dev->is_timeout_deq) {
226                         event_dev->dequeue      = ssogws_deq_seg_timeout
227                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
228                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
229                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
230                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
231                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
232                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
233                         event_dev->dequeue_burst        =
234                                 ssogws_deq_seg_timeout_burst
235                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
236                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
237                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
238                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
239                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
240                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
241                 }
242         } else {
243                 event_dev->dequeue                      = ssogws_deq
244                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
245                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
246                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
247                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
248                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
249                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
250                 event_dev->dequeue_burst                = ssogws_deq_burst
251                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
252                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
253                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
254                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
255                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
256                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
257                 if (dev->is_timeout_deq) {
258                         event_dev->dequeue              = ssogws_deq_timeout
259                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
260                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
261                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
262                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
263                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
264                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
265                         event_dev->dequeue_burst        =
266                                 ssogws_deq_timeout_burst
267                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
268                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
269                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
270                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
271                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
272                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
273                 }
274         }
275
276         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
277                 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
278                 event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
279                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
280                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
281                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
282                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
283                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
284                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
285         } else {
286                 event_dev->txa_enqueue = ssogws_tx_adptr_enq
287                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
288                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
289                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
290                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
291                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
292                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
293         }
294
295         if (dev->dual_ws) {
296                 event_dev->enqueue              = otx2_ssogws_dual_enq;
297                 event_dev->enqueue_burst        = otx2_ssogws_dual_enq_burst;
298                 event_dev->enqueue_new_burst    =
299                                         otx2_ssogws_dual_enq_new_burst;
300                 event_dev->enqueue_forward_burst =
301                                         otx2_ssogws_dual_enq_fwd_burst;
302
303                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
304                         event_dev->dequeue      = ssogws_dual_deq_seg
305                                 [!!(dev->rx_offloads &
306                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
307                                 [!!(dev->rx_offloads &
308                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
309                                 [!!(dev->rx_offloads &
310                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
311                                 [!!(dev->rx_offloads &
312                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
313                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
314                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
315                         event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
316                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
317                                 [!!(dev->rx_offloads &
318                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
319                                 [!!(dev->rx_offloads &
320                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
321                                 [!!(dev->rx_offloads &
322                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
323                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
324                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
325                         if (dev->is_timeout_deq) {
326                                 event_dev->dequeue      =
327                                         ssogws_dual_deq_seg_timeout
328                                         [!!(dev->rx_offloads &
329                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
330                                         [!!(dev->rx_offloads &
331                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
332                                         [!!(dev->rx_offloads &
333                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
334                                         [!!(dev->rx_offloads &
335                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
336                                         [!!(dev->rx_offloads &
337                                                         NIX_RX_OFFLOAD_PTYPE_F)]
338                                         [!!(dev->rx_offloads &
339                                                         NIX_RX_OFFLOAD_RSS_F)];
340                                 event_dev->dequeue_burst =
341                                         ssogws_dual_deq_seg_timeout_burst
342                                         [!!(dev->rx_offloads &
343                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
344                                         [!!(dev->rx_offloads &
345                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
346                                         [!!(dev->rx_offloads &
347                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
348                                         [!!(dev->rx_offloads &
349                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
350                                         [!!(dev->rx_offloads &
351                                                         NIX_RX_OFFLOAD_PTYPE_F)]
352                                         [!!(dev->rx_offloads &
353                                                         NIX_RX_OFFLOAD_RSS_F)];
354                         }
355                 } else {
356                         event_dev->dequeue              = ssogws_dual_deq
357                                 [!!(dev->rx_offloads &
358                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
359                                 [!!(dev->rx_offloads &
360                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
361                                 [!!(dev->rx_offloads &
362                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
363                                 [!!(dev->rx_offloads &
364                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
365                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
366                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
367                         event_dev->dequeue_burst        = ssogws_dual_deq_burst
368                                 [!!(dev->rx_offloads &
369                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
370                                 [!!(dev->rx_offloads &
371                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
372                                 [!!(dev->rx_offloads &
373                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
374                                 [!!(dev->rx_offloads &
375                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
376                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
377                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
378                         if (dev->is_timeout_deq) {
379                                 event_dev->dequeue      =
380                                         ssogws_dual_deq_timeout
381                                         [!!(dev->rx_offloads &
382                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
383                                         [!!(dev->rx_offloads &
384                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
385                                         [!!(dev->rx_offloads &
386                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
387                                         [!!(dev->rx_offloads &
388                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
389                                         [!!(dev->rx_offloads &
390                                                         NIX_RX_OFFLOAD_PTYPE_F)]
391                                         [!!(dev->rx_offloads &
392                                                         NIX_RX_OFFLOAD_RSS_F)];
393                                 event_dev->dequeue_burst =
394                                         ssogws_dual_deq_timeout_burst
395                                         [!!(dev->rx_offloads &
396                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
397                                         [!!(dev->rx_offloads &
398                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
399                                         [!!(dev->rx_offloads &
400                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
401                                         [!!(dev->rx_offloads &
402                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
403                                         [!!(dev->rx_offloads &
404                                                         NIX_RX_OFFLOAD_PTYPE_F)]
405                                         [!!(dev->rx_offloads &
406                                                         NIX_RX_OFFLOAD_RSS_F)];
407                         }
408                 }
409
410                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
411                 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
412                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
413                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
414                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
415                                 [!!(dev->tx_offloads &
416                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
417                                 [!!(dev->tx_offloads &
418                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
419                                 [!!(dev->tx_offloads &
420                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
421                                 [!!(dev->tx_offloads &
422                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
423                 } else {
424                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
425                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
426                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
427                                 [!!(dev->tx_offloads &
428                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
429                                 [!!(dev->tx_offloads &
430                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
431                                 [!!(dev->tx_offloads &
432                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
433                                 [!!(dev->tx_offloads &
434                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
435                 }
436         }
437         rte_mb();
438 }
439
440 static void
441 otx2_sso_info_get(struct rte_eventdev *event_dev,
442                   struct rte_event_dev_info *dev_info)
443 {
444         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
445
446         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
447         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
448         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
449         dev_info->max_event_queues = dev->max_event_queues;
450         dev_info->max_event_queue_flows = (1ULL << 20);
451         dev_info->max_event_queue_priority_levels = 8;
452         dev_info->max_event_priority_levels = 1;
453         dev_info->max_event_ports = dev->max_event_ports;
454         dev_info->max_event_port_dequeue_depth = 1;
455         dev_info->max_event_port_enqueue_depth = 1;
456         dev_info->max_num_events =  dev->max_num_events;
457         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
458                                         RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
459                                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
460                                         RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
461                                         RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
462                                         RTE_EVENT_DEV_CAP_NONSEQ_MODE;
463 }
464
465 static void
466 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
467 {
468         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
469         uint64_t val;
470
471         val = queue;
472         val |= 0ULL << 12; /* SET 0 */
473         val |= 0x8000800080000000; /* Dont modify rest of the masks */
474         val |= (uint64_t)enable << 14;   /* Enable/Disable Membership. */
475
476         otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
477 }
478
479 static int
480 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
481                    const uint8_t queues[], const uint8_t priorities[],
482                    uint16_t nb_links)
483 {
484         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
485         uint8_t port_id = 0;
486         uint16_t link;
487
488         RTE_SET_USED(priorities);
489         for (link = 0; link < nb_links; link++) {
490                 if (dev->dual_ws) {
491                         struct otx2_ssogws_dual *ws = port;
492
493                         port_id = ws->port;
494                         sso_port_link_modify((struct otx2_ssogws *)
495                                         &ws->ws_state[0], queues[link], true);
496                         sso_port_link_modify((struct otx2_ssogws *)
497                                         &ws->ws_state[1], queues[link], true);
498                 } else {
499                         struct otx2_ssogws *ws = port;
500
501                         port_id = ws->port;
502                         sso_port_link_modify(ws, queues[link], true);
503                 }
504         }
505         sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
506
507         return (int)nb_links;
508 }
509
510 static int
511 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
512                      uint8_t queues[], uint16_t nb_unlinks)
513 {
514         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
515         uint8_t port_id = 0;
516         uint16_t unlink;
517
518         for (unlink = 0; unlink < nb_unlinks; unlink++) {
519                 if (dev->dual_ws) {
520                         struct otx2_ssogws_dual *ws = port;
521
522                         port_id = ws->port;
523                         sso_port_link_modify((struct otx2_ssogws *)
524                                         &ws->ws_state[0], queues[unlink],
525                                         false);
526                         sso_port_link_modify((struct otx2_ssogws *)
527                                         &ws->ws_state[1], queues[unlink],
528                                         false);
529                 } else {
530                         struct otx2_ssogws *ws = port;
531
532                         port_id = ws->port;
533                         sso_port_link_modify(ws, queues[unlink], false);
534                 }
535         }
536         sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
537
538         return (int)nb_unlinks;
539 }
540
541 static int
542 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
543               uint16_t nb_lf, uint8_t attach)
544 {
545         if (attach) {
546                 struct rsrc_attach_req *req;
547
548                 req = otx2_mbox_alloc_msg_attach_resources(mbox);
549                 switch (type) {
550                 case SSO_LF_GGRP:
551                         req->sso = nb_lf;
552                         break;
553                 case SSO_LF_GWS:
554                         req->ssow = nb_lf;
555                         break;
556                 default:
557                         return -EINVAL;
558                 }
559                 req->modify = true;
560                 if (otx2_mbox_process(mbox) < 0)
561                         return -EIO;
562         } else {
563                 struct rsrc_detach_req *req;
564
565                 req = otx2_mbox_alloc_msg_detach_resources(mbox);
566                 switch (type) {
567                 case SSO_LF_GGRP:
568                         req->sso = true;
569                         break;
570                 case SSO_LF_GWS:
571                         req->ssow = true;
572                         break;
573                 default:
574                         return -EINVAL;
575                 }
576                 req->partial = true;
577                 if (otx2_mbox_process(mbox) < 0)
578                         return -EIO;
579         }
580
581         return 0;
582 }
583
584 static int
585 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
586            enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
587 {
588         void *rsp;
589         int rc;
590
591         if (alloc) {
592                 switch (type) {
593                 case SSO_LF_GGRP:
594                         {
595                         struct sso_lf_alloc_req *req_ggrp;
596                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
597                         req_ggrp->hwgrps = nb_lf;
598                         }
599                         break;
600                 case SSO_LF_GWS:
601                         {
602                         struct ssow_lf_alloc_req *req_hws;
603                         req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
604                         req_hws->hws = nb_lf;
605                         }
606                         break;
607                 default:
608                         return -EINVAL;
609                 }
610         } else {
611                 switch (type) {
612                 case SSO_LF_GGRP:
613                         {
614                         struct sso_lf_free_req *req_ggrp;
615                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
616                         req_ggrp->hwgrps = nb_lf;
617                         }
618                         break;
619                 case SSO_LF_GWS:
620                         {
621                         struct ssow_lf_free_req *req_hws;
622                         req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
623                         req_hws->hws = nb_lf;
624                         }
625                         break;
626                 default:
627                         return -EINVAL;
628                 }
629         }
630
631         rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
632         if (rc < 0)
633                 return rc;
634
635         if (alloc && type == SSO_LF_GGRP) {
636                 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
637
638                 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
639                 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
640                 dev->iue = rsp_ggrp->in_unit_entries;
641         }
642
643         return 0;
644 }
645
646 static void
647 otx2_sso_port_release(void *port)
648 {
649         rte_free(port);
650 }
651
652 static void
653 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
654 {
655         RTE_SET_USED(event_dev);
656         RTE_SET_USED(queue_id);
657 }
658
659 static void
660 sso_clr_links(const struct rte_eventdev *event_dev)
661 {
662         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
663         int i, j;
664
665         for (i = 0; i < dev->nb_event_ports; i++) {
666                 if (dev->dual_ws) {
667                         struct otx2_ssogws_dual *ws;
668
669                         ws = event_dev->data->ports[i];
670                         for (j = 0; j < dev->nb_event_queues; j++) {
671                                 sso_port_link_modify((struct otx2_ssogws *)
672                                                 &ws->ws_state[0], j, false);
673                                 sso_port_link_modify((struct otx2_ssogws *)
674                                                 &ws->ws_state[1], j, false);
675                         }
676                 } else {
677                         struct otx2_ssogws *ws;
678
679                         ws = event_dev->data->ports[i];
680                         for (j = 0; j < dev->nb_event_queues; j++)
681                                 sso_port_link_modify(ws, j, false);
682                 }
683         }
684 }
685
686 static void
687 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
688 {
689         ws->tag_op              = base + SSOW_LF_GWS_TAG;
690         ws->wqp_op              = base + SSOW_LF_GWS_WQP;
691         ws->getwrk_op           = base + SSOW_LF_GWS_OP_GET_WORK;
692         ws->swtp_op             = base + SSOW_LF_GWS_SWTP;
693         ws->swtag_norm_op       = base + SSOW_LF_GWS_OP_SWTAG_NORM;
694         ws->swtag_desched_op    = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
695 }
696
697 static int
698 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
699 {
700         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
701         struct otx2_mbox *mbox = dev->mbox;
702         uint8_t vws = 0;
703         uint8_t nb_lf;
704         int i, rc;
705
706         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
707
708         nb_lf = dev->nb_event_ports * 2;
709         /* Ask AF to attach required LFs. */
710         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
711         if (rc < 0) {
712                 otx2_err("Failed to attach SSO GWS LF");
713                 return -ENODEV;
714         }
715
716         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
717                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
718                 otx2_err("Failed to init SSO GWS LF");
719                 return -ENODEV;
720         }
721
722         for (i = 0; i < dev->nb_event_ports; i++) {
723                 struct otx2_ssogws_dual *ws;
724                 uintptr_t base;
725
726                 /* Free memory prior to re-allocation if needed */
727                 if (event_dev->data->ports[i] != NULL) {
728                         ws = event_dev->data->ports[i];
729                         rte_free(ws);
730                         ws = NULL;
731                 }
732
733                 /* Allocate event port memory */
734                 ws = rte_zmalloc_socket("otx2_sso_ws",
735                                         sizeof(struct otx2_ssogws_dual),
736                                         RTE_CACHE_LINE_SIZE,
737                                         event_dev->data->socket_id);
738                 if (ws == NULL) {
739                         otx2_err("Failed to alloc memory for port=%d", i);
740                         rc = -ENOMEM;
741                         break;
742                 }
743
744                 ws->port = i;
745                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
746                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
747                 vws++;
748
749                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
750                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
751                 vws++;
752
753                 event_dev->data->ports[i] = ws;
754         }
755
756         if (rc < 0) {
757                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
758                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
759         }
760
761         return rc;
762 }
763
764 static int
765 sso_configure_ports(const struct rte_eventdev *event_dev)
766 {
767         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
768         struct otx2_mbox *mbox = dev->mbox;
769         uint8_t nb_lf;
770         int i, rc;
771
772         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
773
774         nb_lf = dev->nb_event_ports;
775         /* Ask AF to attach required LFs. */
776         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
777         if (rc < 0) {
778                 otx2_err("Failed to attach SSO GWS LF");
779                 return -ENODEV;
780         }
781
782         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
783                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
784                 otx2_err("Failed to init SSO GWS LF");
785                 return -ENODEV;
786         }
787
788         for (i = 0; i < nb_lf; i++) {
789                 struct otx2_ssogws *ws;
790                 uintptr_t base;
791
792                 /* Free memory prior to re-allocation if needed */
793                 if (event_dev->data->ports[i] != NULL) {
794                         ws = event_dev->data->ports[i];
795                         rte_free(ws);
796                         ws = NULL;
797                 }
798
799                 /* Allocate event port memory */
800                 ws = rte_zmalloc_socket("otx2_sso_ws",
801                                         sizeof(struct otx2_ssogws),
802                                         RTE_CACHE_LINE_SIZE,
803                                         event_dev->data->socket_id);
804                 if (ws == NULL) {
805                         otx2_err("Failed to alloc memory for port=%d", i);
806                         rc = -ENOMEM;
807                         break;
808                 }
809
810                 ws->port = i;
811                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
812                 sso_set_port_ops(ws, base);
813
814                 event_dev->data->ports[i] = ws;
815         }
816
817         if (rc < 0) {
818                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
819                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
820         }
821
822         return rc;
823 }
824
825 static int
826 sso_configure_queues(const struct rte_eventdev *event_dev)
827 {
828         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
829         struct otx2_mbox *mbox = dev->mbox;
830         uint8_t nb_lf;
831         int rc;
832
833         otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
834
835         nb_lf = dev->nb_event_queues;
836         /* Ask AF to attach required LFs. */
837         rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
838         if (rc < 0) {
839                 otx2_err("Failed to attach SSO GGRP LF");
840                 return -ENODEV;
841         }
842
843         if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
844                 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
845                 otx2_err("Failed to init SSO GGRP LF");
846                 return -ENODEV;
847         }
848
849         return rc;
850 }
851
852 static int
853 sso_xaq_allocate(struct otx2_sso_evdev *dev)
854 {
855         const struct rte_memzone *mz;
856         struct npa_aura_s *aura;
857         static int reconfig_cnt;
858         char pool_name[RTE_MEMZONE_NAMESIZE];
859         uint32_t xaq_cnt;
860         int rc;
861
862         if (dev->xaq_pool)
863                 rte_mempool_free(dev->xaq_pool);
864
865         /*
866          * Allocate memory for Add work backpressure.
867          */
868         mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
869         if (mz == NULL)
870                 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
871                                                  OTX2_ALIGN +
872                                                  sizeof(struct npa_aura_s),
873                                                  rte_socket_id(),
874                                                  RTE_MEMZONE_IOVA_CONTIG,
875                                                  OTX2_ALIGN);
876         if (mz == NULL) {
877                 otx2_err("Failed to allocate mem for fcmem");
878                 return -ENOMEM;
879         }
880
881         dev->fc_iova = mz->iova;
882         dev->fc_mem = mz->addr;
883
884         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
885         memset(aura, 0, sizeof(struct npa_aura_s));
886
887         aura->fc_ena = 1;
888         aura->fc_addr = dev->fc_iova;
889         aura->fc_hyst_bits = 0; /* Store count on all updates */
890
891         /* Taken from HRM 14.3.3(4) */
892         xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
893         if (dev->xae_cnt)
894                 xaq_cnt += dev->xae_cnt / dev->xae_waes;
895         else if (dev->adptr_xae_cnt)
896                 xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
897                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
898         else
899                 xaq_cnt += (dev->iue / dev->xae_waes) +
900                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
901
902         otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
903         /* Setup XAQ based on number of nb queues. */
904         snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
905         dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
906                         xaq_cnt, dev->xaq_buf_size, 0, 0,
907                         rte_socket_id(), 0);
908
909         if (dev->xaq_pool == NULL) {
910                 otx2_err("Unable to create empty mempool.");
911                 rte_memzone_free(mz);
912                 return -ENOMEM;
913         }
914
915         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
916                                         rte_mbuf_platform_mempool_ops(), aura);
917         if (rc != 0) {
918                 otx2_err("Unable to set xaqpool ops.");
919                 goto alloc_fail;
920         }
921
922         rc = rte_mempool_populate_default(dev->xaq_pool);
923         if (rc < 0) {
924                 otx2_err("Unable to set populate xaqpool.");
925                 goto alloc_fail;
926         }
927         reconfig_cnt++;
928         /* When SW does addwork (enqueue) check if there is space in XAQ by
929          * comparing fc_addr above against the xaq_lmt calculated below.
930          * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
931          * to request XAQ to cache them even before enqueue is called.
932          */
933         dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
934                                   dev->nb_event_queues);
935         dev->nb_xaq_cfg = xaq_cnt;
936
937         return 0;
938 alloc_fail:
939         rte_mempool_free(dev->xaq_pool);
940         rte_memzone_free(mz);
941         return rc;
942 }
943
944 static int
945 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
946 {
947         struct otx2_mbox *mbox = dev->mbox;
948         struct sso_hw_setconfig *req;
949
950         otx2_sso_dbg("Configuring XAQ for GGRPs");
951         req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
952         req->npa_pf_func = otx2_npa_pf_func_get();
953         req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
954         req->hwgrps = dev->nb_event_queues;
955
956         return otx2_mbox_process(mbox);
957 }
958
959 static void
960 sso_lf_teardown(struct otx2_sso_evdev *dev,
961                 enum otx2_sso_lf_type lf_type)
962 {
963         uint8_t nb_lf;
964
965         switch (lf_type) {
966         case SSO_LF_GGRP:
967                 nb_lf = dev->nb_event_queues;
968                 break;
969         case SSO_LF_GWS:
970                 nb_lf = dev->nb_event_ports;
971                 nb_lf *= dev->dual_ws ? 2 : 1;
972                 break;
973         default:
974                 return;
975         }
976
977         sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
978         sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
979 }
980
981 static int
982 otx2_sso_configure(const struct rte_eventdev *event_dev)
983 {
984         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
985         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
986         uint32_t deq_tmo_ns;
987         int rc;
988
989         sso_func_trace();
990         deq_tmo_ns = conf->dequeue_timeout_ns;
991
992         if (deq_tmo_ns == 0)
993                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
994
995         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
996             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
997                 otx2_err("Unsupported dequeue timeout requested");
998                 return -EINVAL;
999         }
1000
1001         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
1002                 dev->is_timeout_deq = 1;
1003
1004         dev->deq_tmo_ns = deq_tmo_ns;
1005
1006         if (conf->nb_event_ports > dev->max_event_ports ||
1007             conf->nb_event_queues > dev->max_event_queues) {
1008                 otx2_err("Unsupported event queues/ports requested");
1009                 return -EINVAL;
1010         }
1011
1012         if (conf->nb_event_port_dequeue_depth > 1) {
1013                 otx2_err("Unsupported event port deq depth requested");
1014                 return -EINVAL;
1015         }
1016
1017         if (conf->nb_event_port_enqueue_depth > 1) {
1018                 otx2_err("Unsupported event port enq depth requested");
1019                 return -EINVAL;
1020         }
1021
1022         if (dev->configured)
1023                 sso_unregister_irqs(event_dev);
1024
1025         if (dev->nb_event_queues) {
1026                 /* Finit any previous queues. */
1027                 sso_lf_teardown(dev, SSO_LF_GGRP);
1028         }
1029         if (dev->nb_event_ports) {
1030                 /* Finit any previous ports. */
1031                 sso_lf_teardown(dev, SSO_LF_GWS);
1032         }
1033
1034         dev->nb_event_queues = conf->nb_event_queues;
1035         dev->nb_event_ports = conf->nb_event_ports;
1036
1037         if (dev->dual_ws)
1038                 rc = sso_configure_dual_ports(event_dev);
1039         else
1040                 rc = sso_configure_ports(event_dev);
1041
1042         if (rc < 0) {
1043                 otx2_err("Failed to configure event ports");
1044                 return -ENODEV;
1045         }
1046
1047         if (sso_configure_queues(event_dev) < 0) {
1048                 otx2_err("Failed to configure event queues");
1049                 rc = -ENODEV;
1050                 goto teardown_hws;
1051         }
1052
1053         if (sso_xaq_allocate(dev) < 0) {
1054                 rc = -ENOMEM;
1055                 goto teardown_hwggrp;
1056         }
1057
1058         /* Clear any prior port-queue mapping. */
1059         sso_clr_links(event_dev);
1060         rc = sso_ggrp_alloc_xaq(dev);
1061         if (rc < 0) {
1062                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1063                 goto teardown_hwggrp;
1064         }
1065
1066         rc = sso_get_msix_offsets(event_dev);
1067         if (rc < 0) {
1068                 otx2_err("Failed to get msix offsets %d", rc);
1069                 goto teardown_hwggrp;
1070         }
1071
1072         rc = sso_register_irqs(event_dev);
1073         if (rc < 0) {
1074                 otx2_err("Failed to register irq %d", rc);
1075                 goto teardown_hwggrp;
1076         }
1077
1078         dev->configured = 1;
1079         rte_mb();
1080
1081         return 0;
1082 teardown_hwggrp:
1083         sso_lf_teardown(dev, SSO_LF_GGRP);
1084 teardown_hws:
1085         sso_lf_teardown(dev, SSO_LF_GWS);
1086         dev->nb_event_queues = 0;
1087         dev->nb_event_ports = 0;
1088         dev->configured = 0;
1089         return rc;
1090 }
1091
1092 static void
1093 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1094                         struct rte_event_queue_conf *queue_conf)
1095 {
1096         RTE_SET_USED(event_dev);
1097         RTE_SET_USED(queue_id);
1098
1099         queue_conf->nb_atomic_flows = (1ULL << 20);
1100         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1101         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1102         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1103 }
1104
1105 static int
1106 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1107                      const struct rte_event_queue_conf *queue_conf)
1108 {
1109         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1110         struct otx2_mbox *mbox = dev->mbox;
1111         struct sso_grp_priority *req;
1112         int rc;
1113
1114         sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1115
1116         req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1117         req->grp = queue_id;
1118         req->weight = 0xFF;
1119         req->affinity = 0xFF;
1120         /* Normalize <0-255> to <0-7> */
1121         req->priority = queue_conf->priority / 32;
1122
1123         rc = otx2_mbox_process(mbox);
1124         if (rc < 0) {
1125                 otx2_err("Failed to set priority queue=%d", queue_id);
1126                 return rc;
1127         }
1128
1129         return 0;
1130 }
1131
1132 static void
1133 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1134                        struct rte_event_port_conf *port_conf)
1135 {
1136         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1137
1138         RTE_SET_USED(port_id);
1139         port_conf->new_event_threshold = dev->max_num_events;
1140         port_conf->dequeue_depth = 1;
1141         port_conf->enqueue_depth = 1;
1142 }
1143
1144 static int
1145 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1146                     const struct rte_event_port_conf *port_conf)
1147 {
1148         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1149         uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1150         uint64_t val;
1151         uint16_t q;
1152
1153         sso_func_trace("Port=%d", port_id);
1154         RTE_SET_USED(port_conf);
1155
1156         if (event_dev->data->ports[port_id] == NULL) {
1157                 otx2_err("Invalid port Id %d", port_id);
1158                 return -EINVAL;
1159         }
1160
1161         for (q = 0; q < dev->nb_event_queues; q++) {
1162                 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1163                 if (grps_base[q] == 0) {
1164                         otx2_err("Failed to get grp[%d] base addr", q);
1165                         return -EINVAL;
1166                 }
1167         }
1168
1169         /* Set get_work timeout for HWS */
1170         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1171
1172         if (dev->dual_ws) {
1173                 struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1174
1175                 rte_memcpy(ws->grps_base, grps_base,
1176                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1177                 ws->fc_mem = dev->fc_mem;
1178                 ws->xaq_lmt = dev->xaq_lmt;
1179                 ws->tstamp = dev->tstamp;
1180                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1181                              ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1182                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1183                              ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1184         } else {
1185                 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1186                 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1187
1188                 rte_memcpy(ws->grps_base, grps_base,
1189                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1190                 ws->fc_mem = dev->fc_mem;
1191                 ws->xaq_lmt = dev->xaq_lmt;
1192                 ws->tstamp = dev->tstamp;
1193                 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1194         }
1195
1196         otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1197
1198         return 0;
1199 }
1200
1201 static int
1202 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1203                        uint64_t *tmo_ticks)
1204 {
1205         RTE_SET_USED(event_dev);
1206         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1207
1208         return 0;
1209 }
1210
1211 static void
1212 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1213 {
1214         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1215
1216         fprintf(f, "SSOW_LF_GWS Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1217         fprintf(f, "SSOW_LF_GWS_LINKS       0x%" PRIx64 "\n",
1218                 otx2_read64(base + SSOW_LF_GWS_LINKS));
1219         fprintf(f, "SSOW_LF_GWS_PENDWQP     0x%" PRIx64 "\n",
1220                 otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1221         fprintf(f, "SSOW_LF_GWS_PENDSTATE   0x%" PRIx64 "\n",
1222                 otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1223         fprintf(f, "SSOW_LF_GWS_NW_TIM      0x%" PRIx64 "\n",
1224                 otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1225         fprintf(f, "SSOW_LF_GWS_TAG         0x%" PRIx64 "\n",
1226                 otx2_read64(base + SSOW_LF_GWS_TAG));
1227         fprintf(f, "SSOW_LF_GWS_WQP         0x%" PRIx64 "\n",
1228                 otx2_read64(base + SSOW_LF_GWS_TAG));
1229         fprintf(f, "SSOW_LF_GWS_SWTP        0x%" PRIx64 "\n",
1230                 otx2_read64(base + SSOW_LF_GWS_SWTP));
1231         fprintf(f, "SSOW_LF_GWS_PENDTAG     0x%" PRIx64 "\n",
1232                 otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1233 }
1234
1235 static void
1236 ssoggrp_dump(uintptr_t base, FILE *f)
1237 {
1238         fprintf(f, "SSO_LF_GGRP Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1239         fprintf(f, "SSO_LF_GGRP_QCTL        0x%" PRIx64 "\n",
1240                 otx2_read64(base + SSO_LF_GGRP_QCTL));
1241         fprintf(f, "SSO_LF_GGRP_XAQ_CNT     0x%" PRIx64 "\n",
1242                 otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1243         fprintf(f, "SSO_LF_GGRP_INT_THR     0x%" PRIx64 "\n",
1244                 otx2_read64(base + SSO_LF_GGRP_INT_THR));
1245         fprintf(f, "SSO_LF_GGRP_INT_CNT     0x%" PRIX64 "\n",
1246                 otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1247         fprintf(f, "SSO_LF_GGRP_AQ_CNT      0x%" PRIX64 "\n",
1248                 otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1249         fprintf(f, "SSO_LF_GGRP_AQ_THR      0x%" PRIX64 "\n",
1250                 otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1251         fprintf(f, "SSO_LF_GGRP_MISC_CNT    0x%" PRIx64 "\n",
1252                 otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1253 }
1254
1255 static void
1256 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1257 {
1258         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1259         uint8_t queue;
1260         uint8_t port;
1261
1262         fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1263                 "dual_ws" : "single_ws");
1264         /* Dump SSOW registers */
1265         for (port = 0; port < dev->nb_event_ports; port++) {
1266                 if (dev->dual_ws) {
1267                         struct otx2_ssogws_dual *ws =
1268                                 event_dev->data->ports[port];
1269
1270                         fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1271                                 __func__, port, 0);
1272                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1273                         fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1274                                 __func__, port, 1);
1275                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1276                 } else {
1277                         fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1278                                 __func__, port);
1279                         ssogws_dump(event_dev->data->ports[port], f);
1280                 }
1281         }
1282
1283         /* Dump SSO registers */
1284         for (queue = 0; queue < dev->nb_event_queues; queue++) {
1285                 fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1286                 if (dev->dual_ws) {
1287                         struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1288                         ssoggrp_dump(ws->grps_base[queue], f);
1289                 } else {
1290                         struct otx2_ssogws *ws = event_dev->data->ports[0];
1291                         ssoggrp_dump(ws->grps_base[queue], f);
1292                 }
1293         }
1294 }
1295
1296 static void
1297 otx2_handle_event(void *arg, struct rte_event event)
1298 {
1299         struct rte_eventdev *event_dev = arg;
1300
1301         if (event_dev->dev_ops->dev_stop_flush != NULL)
1302                 event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1303                                 event, event_dev->data->dev_stop_flush_arg);
1304 }
1305
1306 static void
1307 sso_qos_cfg(struct rte_eventdev *event_dev)
1308 {
1309         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1310         struct sso_grp_qos_cfg *req;
1311         uint16_t i;
1312
1313         for (i = 0; i < dev->qos_queue_cnt; i++) {
1314                 uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1315                 uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1316                 uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1317
1318                 if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1319                         continue;
1320
1321                 req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1322                 req->xaq_limit = (dev->nb_xaq_cfg *
1323                                   (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1324                 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1325                                 (iaq_prcnt ? iaq_prcnt : 100)) / 100;
1326                 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1327                                 (taq_prcnt ? taq_prcnt : 100)) / 100;
1328         }
1329
1330         if (dev->qos_queue_cnt)
1331                 otx2_mbox_process(dev->mbox);
1332 }
1333
1334 static void
1335 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1336 {
1337         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1338         uint16_t i;
1339
1340         for (i = 0; i < dev->nb_event_ports; i++) {
1341                 if (dev->dual_ws) {
1342                         struct otx2_ssogws_dual *ws;
1343
1344                         ws = event_dev->data->ports[i];
1345                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1346                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1347                         ws->swtag_req = 0;
1348                         ws->vws = 0;
1349                         ws->ws_state[0].cur_grp = 0;
1350                         ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1351                         ws->ws_state[1].cur_grp = 0;
1352                         ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
1353                 } else {
1354                         struct otx2_ssogws *ws;
1355
1356                         ws = event_dev->data->ports[i];
1357                         ssogws_reset(ws);
1358                         ws->swtag_req = 0;
1359                         ws->cur_grp = 0;
1360                         ws->cur_tt = SSO_SYNC_EMPTY;
1361                 }
1362         }
1363
1364         rte_mb();
1365         if (dev->dual_ws) {
1366                 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1367                 struct otx2_ssogws temp_ws;
1368
1369                 memcpy(&temp_ws, &ws->ws_state[0],
1370                        sizeof(struct otx2_ssogws_state));
1371                 for (i = 0; i < dev->nb_event_queues; i++) {
1372                         /* Consume all the events through HWS0 */
1373                         ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1374                                             otx2_handle_event, event_dev);
1375                         /* Enable/Disable SSO GGRP */
1376                         otx2_write64(enable, ws->grps_base[i] +
1377                                      SSO_LF_GGRP_QCTL);
1378                 }
1379                 ws->ws_state[0].cur_grp = 0;
1380                 ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1381         } else {
1382                 struct otx2_ssogws *ws = event_dev->data->ports[0];
1383
1384                 for (i = 0; i < dev->nb_event_queues; i++) {
1385                         /* Consume all the events through HWS0 */
1386                         ssogws_flush_events(ws, i, ws->grps_base[i],
1387                                             otx2_handle_event, event_dev);
1388                         /* Enable/Disable SSO GGRP */
1389                         otx2_write64(enable, ws->grps_base[i] +
1390                                      SSO_LF_GGRP_QCTL);
1391                 }
1392                 ws->cur_grp = 0;
1393                 ws->cur_tt = SSO_SYNC_EMPTY;
1394         }
1395
1396         /* reset SSO GWS cache */
1397         otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1398         otx2_mbox_process(dev->mbox);
1399 }
1400
1401 int
1402 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1403 {
1404         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1405         struct rte_mempool *prev_xaq_pool;
1406         int rc = 0;
1407
1408         if (event_dev->data->dev_started)
1409                 sso_cleanup(event_dev, 0);
1410
1411         prev_xaq_pool = dev->xaq_pool;
1412         dev->xaq_pool = NULL;
1413         rc = sso_xaq_allocate(dev);
1414         if (rc < 0) {
1415                 otx2_err("Failed to alloc xaq pool %d", rc);
1416                 rte_mempool_free(prev_xaq_pool);
1417                 return rc;
1418         }
1419         rc = sso_ggrp_alloc_xaq(dev);
1420         if (rc < 0) {
1421                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1422                 rte_mempool_free(prev_xaq_pool);
1423                 return rc;
1424         }
1425
1426         rte_mempool_free(prev_xaq_pool);
1427         rte_mb();
1428         if (event_dev->data->dev_started)
1429                 sso_cleanup(event_dev, 1);
1430
1431         return 0;
1432 }
1433
1434 static int
1435 otx2_sso_start(struct rte_eventdev *event_dev)
1436 {
1437         sso_func_trace();
1438         sso_qos_cfg(event_dev);
1439         sso_cleanup(event_dev, 1);
1440         sso_fastpath_fns_set(event_dev);
1441
1442         return 0;
1443 }
1444
1445 static void
1446 otx2_sso_stop(struct rte_eventdev *event_dev)
1447 {
1448         sso_func_trace();
1449         sso_cleanup(event_dev, 0);
1450         rte_mb();
1451 }
1452
1453 static int
1454 otx2_sso_close(struct rte_eventdev *event_dev)
1455 {
1456         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1457         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1458         uint16_t i;
1459
1460         if (!dev->configured)
1461                 return 0;
1462
1463         sso_unregister_irqs(event_dev);
1464
1465         for (i = 0; i < dev->nb_event_queues; i++)
1466                 all_queues[i] = i;
1467
1468         for (i = 0; i < dev->nb_event_ports; i++)
1469                 otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1470                                      all_queues, dev->nb_event_queues);
1471
1472         sso_lf_teardown(dev, SSO_LF_GGRP);
1473         sso_lf_teardown(dev, SSO_LF_GWS);
1474         dev->nb_event_ports = 0;
1475         dev->nb_event_queues = 0;
1476         rte_mempool_free(dev->xaq_pool);
1477         rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1478
1479         return 0;
1480 }
1481
1482 /* Initialize and register event driver with DPDK Application */
1483 static struct rte_eventdev_ops otx2_sso_ops = {
1484         .dev_infos_get    = otx2_sso_info_get,
1485         .dev_configure    = otx2_sso_configure,
1486         .queue_def_conf   = otx2_sso_queue_def_conf,
1487         .queue_setup      = otx2_sso_queue_setup,
1488         .queue_release    = otx2_sso_queue_release,
1489         .port_def_conf    = otx2_sso_port_def_conf,
1490         .port_setup       = otx2_sso_port_setup,
1491         .port_release     = otx2_sso_port_release,
1492         .port_link        = otx2_sso_port_link,
1493         .port_unlink      = otx2_sso_port_unlink,
1494         .timeout_ticks    = otx2_sso_timeout_ticks,
1495
1496         .eth_rx_adapter_caps_get  = otx2_sso_rx_adapter_caps_get,
1497         .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1498         .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1499         .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1500         .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1501
1502         .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1503         .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1504         .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1505
1506         .timer_adapter_caps_get = otx2_tim_caps_get,
1507
1508         .xstats_get       = otx2_sso_xstats_get,
1509         .xstats_reset     = otx2_sso_xstats_reset,
1510         .xstats_get_names = otx2_sso_xstats_get_names,
1511
1512         .dump             = otx2_sso_dump,
1513         .dev_start        = otx2_sso_start,
1514         .dev_stop         = otx2_sso_stop,
1515         .dev_close        = otx2_sso_close,
1516         .dev_selftest     = otx2_sso_selftest,
1517 };
1518
1519 #define OTX2_SSO_XAE_CNT        "xae_cnt"
1520 #define OTX2_SSO_SINGLE_WS      "single_ws"
1521 #define OTX2_SSO_GGRP_QOS       "qos"
1522 #define OTX2_SSO_SELFTEST       "selftest"
1523
1524 static void
1525 parse_queue_param(char *value, void *opaque)
1526 {
1527         struct otx2_sso_qos queue_qos = {0};
1528         uint8_t *val = (uint8_t *)&queue_qos;
1529         struct otx2_sso_evdev *dev = opaque;
1530         char *tok = strtok(value, "-");
1531         struct otx2_sso_qos *old_ptr;
1532
1533         if (!strlen(value))
1534                 return;
1535
1536         while (tok != NULL) {
1537                 *val = atoi(tok);
1538                 tok = strtok(NULL, "-");
1539                 val++;
1540         }
1541
1542         if (val != (&queue_qos.iaq_prcnt + 1)) {
1543                 otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1544                 return;
1545         }
1546
1547         dev->qos_queue_cnt++;
1548         old_ptr = dev->qos_parse_data;
1549         dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1550                                           sizeof(struct otx2_sso_qos) *
1551                                           dev->qos_queue_cnt, 0);
1552         if (dev->qos_parse_data == NULL) {
1553                 dev->qos_parse_data = old_ptr;
1554                 dev->qos_queue_cnt--;
1555                 return;
1556         }
1557         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1558 }
1559
1560 static void
1561 parse_qos_list(const char *value, void *opaque)
1562 {
1563         char *s = strdup(value);
1564         char *start = NULL;
1565         char *end = NULL;
1566         char *f = s;
1567
1568         while (*s) {
1569                 if (*s == '[')
1570                         start = s;
1571                 else if (*s == ']')
1572                         end = s;
1573
1574                 if (start && start < end) {
1575                         *end = 0;
1576                         parse_queue_param(start + 1, opaque);
1577                         s = end;
1578                         start = end;
1579                 }
1580                 s++;
1581         }
1582
1583         free(f);
1584 }
1585
1586 static int
1587 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1588 {
1589         RTE_SET_USED(key);
1590
1591         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1592          * isn't allowed. Everything is expressed in percentages, 0 represents
1593          * default.
1594          */
1595         parse_qos_list(value, opaque);
1596
1597         return 0;
1598 }
1599
1600 static void
1601 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1602 {
1603         struct rte_kvargs *kvlist;
1604         uint8_t single_ws = 0;
1605
1606         if (devargs == NULL)
1607                 return;
1608         kvlist = rte_kvargs_parse(devargs->args, NULL);
1609         if (kvlist == NULL)
1610                 return;
1611
1612         rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,
1613                            &dev->selftest);
1614         rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1615                            &dev->xae_cnt);
1616         rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1617                            &single_ws);
1618         rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1619                            dev);
1620
1621         dev->dual_ws = !single_ws;
1622         rte_kvargs_free(kvlist);
1623 }
1624
1625 static int
1626 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1627 {
1628         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1629                                        sizeof(struct otx2_sso_evdev),
1630                                        otx2_sso_init);
1631 }
1632
1633 static int
1634 otx2_sso_remove(struct rte_pci_device *pci_dev)
1635 {
1636         return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1637 }
1638
1639 static const struct rte_pci_id pci_sso_map[] = {
1640         {
1641                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1642                                PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1643         },
1644         {
1645                 .vendor_id = 0,
1646         },
1647 };
1648
1649 static struct rte_pci_driver pci_sso = {
1650         .id_table = pci_sso_map,
1651         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1652         .probe = otx2_sso_probe,
1653         .remove = otx2_sso_remove,
1654 };
1655
1656 int
1657 otx2_sso_init(struct rte_eventdev *event_dev)
1658 {
1659         struct free_rsrcs_rsp *rsrc_cnt;
1660         struct rte_pci_device *pci_dev;
1661         struct otx2_sso_evdev *dev;
1662         int rc;
1663
1664         event_dev->dev_ops = &otx2_sso_ops;
1665         /* For secondary processes, the primary has done all the work */
1666         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1667                 sso_fastpath_fns_set(event_dev);
1668                 return 0;
1669         }
1670
1671         dev = sso_pmd_priv(event_dev);
1672
1673         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1674
1675         /* Initialize the base otx2_dev object */
1676         rc = otx2_dev_init(pci_dev, dev);
1677         if (rc < 0) {
1678                 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1679                 goto error;
1680         }
1681
1682         /* Get SSO and SSOW MSIX rsrc cnt */
1683         otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1684         rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1685         if (rc < 0) {
1686                 otx2_err("Unable to get free rsrc count");
1687                 goto otx2_dev_uninit;
1688         }
1689         otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1690                      rsrc_cnt->ssow, rsrc_cnt->npa);
1691
1692         dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1693         dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1694         /* Grab the NPA LF if required */
1695         rc = otx2_npa_lf_init(pci_dev, dev);
1696         if (rc < 0) {
1697                 otx2_err("Unable to init NPA lf. It might not be provisioned");
1698                 goto otx2_dev_uninit;
1699         }
1700
1701         dev->drv_inited = true;
1702         dev->is_timeout_deq = 0;
1703         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1704         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1705         dev->max_num_events = -1;
1706         dev->nb_event_queues = 0;
1707         dev->nb_event_ports = 0;
1708
1709         if (!dev->max_event_ports || !dev->max_event_queues) {
1710                 otx2_err("Not enough eventdev resource queues=%d ports=%d",
1711                          dev->max_event_queues, dev->max_event_ports);
1712                 rc = -ENODEV;
1713                 goto otx2_npa_lf_uninit;
1714         }
1715
1716         dev->dual_ws = 1;
1717         sso_parse_devargs(dev, pci_dev->device.devargs);
1718         if (dev->dual_ws) {
1719                 otx2_sso_dbg("Using dual workslot mode");
1720                 dev->max_event_ports = dev->max_event_ports / 2;
1721         } else {
1722                 otx2_sso_dbg("Using single workslot mode");
1723         }
1724
1725         otx2_sso_pf_func_set(dev->pf_func);
1726         otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1727                      event_dev->data->name, dev->max_event_queues,
1728                      dev->max_event_ports);
1729         if (dev->selftest) {
1730                 event_dev->dev->driver = &pci_sso.driver;
1731                 event_dev->dev_ops->dev_selftest();
1732         }
1733
1734         otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1735
1736         return 0;
1737
1738 otx2_npa_lf_uninit:
1739         otx2_npa_lf_fini();
1740 otx2_dev_uninit:
1741         otx2_dev_fini(pci_dev, dev);
1742 error:
1743         return rc;
1744 }
1745
1746 int
1747 otx2_sso_fini(struct rte_eventdev *event_dev)
1748 {
1749         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1750         struct rte_pci_device *pci_dev;
1751
1752         /* For secondary processes, nothing to be done */
1753         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1754                 return 0;
1755
1756         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1757
1758         if (!dev->drv_inited)
1759                 goto dev_fini;
1760
1761         dev->drv_inited = false;
1762         otx2_npa_lf_fini();
1763
1764 dev_fini:
1765         if (otx2_npa_lf_active(dev)) {
1766                 otx2_info("Common resource in use by other devices");
1767                 return -EAGAIN;
1768         }
1769
1770         otx2_tim_fini();
1771         otx2_dev_fini(pci_dev, dev);
1772
1773         return 0;
1774 }
1775
1776 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1777 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1778 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1779 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1780                               OTX2_SSO_SINGLE_WS "=1"
1781                               OTX2_SSO_GGRP_QOS "=<string>"
1782                               OTX2_SSO_SELFTEST "=1");