net/mlx5: fix set VLAN ID/PCP in new header
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
9 #include <rte_eal.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_pci.h>
14
15 #include "otx2_evdev_stats.h"
16 #include "otx2_evdev.h"
17 #include "otx2_irq.h"
18 #include "otx2_tim_evdev.h"
19
20 static inline int
21 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
22 {
23         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
24         uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
25         struct otx2_mbox *mbox = dev->mbox;
26         struct msix_offset_rsp *msix_rsp;
27         int i, rc;
28
29         /* Get SSO and SSOW MSIX vector offsets */
30         otx2_mbox_alloc_msg_msix_offset(mbox);
31         rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
32
33         for (i = 0; i < nb_ports; i++)
34                 dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
35
36         for (i = 0; i < dev->nb_event_queues; i++)
37                 dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
38
39         return rc;
40 }
41
42 void
43 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
44 {
45         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
46         /* Single WS modes */
47         const event_dequeue_t ssogws_deq[2][2][2][2][2][2] = {
48 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
49                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
50 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
51 #undef R
52         };
53
54         const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2] = {
55 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
56                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
57 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
58 #undef R
59         };
60
61         const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2] = {
62 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
63                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
64 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
65 #undef R
66         };
67
68         const event_dequeue_burst_t
69                 ssogws_deq_timeout_burst[2][2][2][2][2][2] = {
70 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
71                 [f5][f4][f3][f2][f1][f0] =                              \
72                         otx2_ssogws_deq_timeout_burst_ ##name,
73 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
74 #undef R
75         };
76
77         const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2] = {
78 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
79                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
80 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
81 #undef R
82         };
83
84         const event_dequeue_burst_t ssogws_deq_seg_burst[2][2][2][2][2][2] = {
85 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
86                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_burst_ ##name,
87 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
88 #undef R
89         };
90
91         const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2] = {
92 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
93                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_timeout_ ##name,
94 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
95 #undef R
96         };
97
98         const event_dequeue_burst_t
99                 ssogws_deq_seg_timeout_burst[2][2][2][2][2][2] = {
100 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
101                 [f5][f4][f3][f2][f1][f0] =                              \
102                                 otx2_ssogws_deq_seg_timeout_burst_ ##name,
103 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
104 #undef R
105         };
106
107
108         /* Dual WS modes */
109         const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2] = {
110 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
111                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
112 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
113 #undef R
114         };
115
116         const event_dequeue_burst_t ssogws_dual_deq_burst[2][2][2][2][2][2] = {
117 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
118                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_burst_ ##name,
119 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
120 #undef R
121         };
122
123         const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2] = {
124 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
125                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_ ##name,
126 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
127 #undef R
128         };
129
130         const event_dequeue_burst_t
131                 ssogws_dual_deq_timeout_burst[2][2][2][2][2][2] = {
132 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
133         [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_burst_ ##name,
134 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
135 #undef R
136         };
137
138         const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2] = {
139 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
140                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
141 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
142 #undef R
143         };
144
145         const event_dequeue_burst_t
146                 ssogws_dual_deq_seg_burst[2][2][2][2][2][2] = {
147 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
148                 [f5][f4][f3][f2][f1][f0] =                              \
149                                 otx2_ssogws_dual_deq_seg_burst_ ##name,
150 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
151 #undef R
152         };
153
154         const event_dequeue_t ssogws_dual_deq_seg_timeout[2][2][2][2][2][2] = {
155 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
156                 [f5][f4][f3][f2][f1][f0] =                              \
157                                 otx2_ssogws_dual_deq_seg_timeout_ ##name,
158 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
159 #undef R
160         };
161
162         const event_dequeue_burst_t
163                 ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2] = {
164 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
165         [f5][f4][f3][f2][f1][f0] =                                      \
166                 otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
167 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
168 #undef R
169         };
170
171         /* Tx modes */
172         const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2][2] = {
173 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
174                 [f5][f4][f3][f2][f1][f0] =  otx2_ssogws_tx_adptr_enq_ ## name,
175 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
176 #undef T
177         };
178
179         const event_tx_adapter_enqueue
180                 ssogws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
181 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
182                 [f5][f4][f3][f2][f1][f0] =                              \
183                         otx2_ssogws_tx_adptr_enq_seg_ ## name,
184 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
185 #undef T
186         };
187
188         const event_tx_adapter_enqueue
189                 ssogws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
190 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
191                 [f5][f4][f3][f2][f1][f0] =                              \
192                         otx2_ssogws_dual_tx_adptr_enq_ ## name,
193 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
194 #undef T
195         };
196
197         const event_tx_adapter_enqueue
198                 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
199 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                      \
200                 [f5][f4][f3][f2][f1][f0] =                              \
201                         otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
202 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
203 #undef T
204         };
205
206         event_dev->enqueue                      = otx2_ssogws_enq;
207         event_dev->enqueue_burst                = otx2_ssogws_enq_burst;
208         event_dev->enqueue_new_burst            = otx2_ssogws_enq_new_burst;
209         event_dev->enqueue_forward_burst        = otx2_ssogws_enq_fwd_burst;
210         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
211                 event_dev->dequeue              = ssogws_deq_seg
212                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
213                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
214                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
215                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
216                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
217                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
218                 event_dev->dequeue_burst        = ssogws_deq_seg_burst
219                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
220                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
221                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
222                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
223                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
224                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
225                 if (dev->is_timeout_deq) {
226                         event_dev->dequeue      = ssogws_deq_seg_timeout
227                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
228                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
229                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
230                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
231                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
232                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
233                         event_dev->dequeue_burst        =
234                                 ssogws_deq_seg_timeout_burst
235                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
236                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
237                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
238                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
239                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
240                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
241                 }
242         } else {
243                 event_dev->dequeue                      = ssogws_deq
244                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
245                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
246                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
247                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
248                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
249                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
250                 event_dev->dequeue_burst                = ssogws_deq_burst
251                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
252                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
253                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
254                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
255                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
256                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
257                 if (dev->is_timeout_deq) {
258                         event_dev->dequeue              = ssogws_deq_timeout
259                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
260                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
261                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
262                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
263                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
264                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
265                         event_dev->dequeue_burst        =
266                                 ssogws_deq_timeout_burst
267                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
268                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
269                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
270                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
271                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
272                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
273                 }
274         }
275
276         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
277                 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
278                 event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
279                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
280                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
281                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
282                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
283                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
284                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
285         } else {
286                 event_dev->txa_enqueue = ssogws_tx_adptr_enq
287                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
288                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
289                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
290                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
291                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
292                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
293         }
294
295         if (dev->dual_ws) {
296                 event_dev->enqueue              = otx2_ssogws_dual_enq;
297                 event_dev->enqueue_burst        = otx2_ssogws_dual_enq_burst;
298                 event_dev->enqueue_new_burst    =
299                                         otx2_ssogws_dual_enq_new_burst;
300                 event_dev->enqueue_forward_burst =
301                                         otx2_ssogws_dual_enq_fwd_burst;
302
303                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
304                         event_dev->dequeue      = ssogws_dual_deq_seg
305                                 [!!(dev->rx_offloads &
306                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
307                                 [!!(dev->rx_offloads &
308                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
309                                 [!!(dev->rx_offloads &
310                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
311                                 [!!(dev->rx_offloads &
312                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
313                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
314                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
315                         event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
316                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
317                                 [!!(dev->rx_offloads &
318                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
319                                 [!!(dev->rx_offloads &
320                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
321                                 [!!(dev->rx_offloads &
322                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
323                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
324                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
325                         if (dev->is_timeout_deq) {
326                                 event_dev->dequeue      =
327                                         ssogws_dual_deq_seg_timeout
328                                         [!!(dev->rx_offloads &
329                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
330                                         [!!(dev->rx_offloads &
331                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
332                                         [!!(dev->rx_offloads &
333                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
334                                         [!!(dev->rx_offloads &
335                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
336                                         [!!(dev->rx_offloads &
337                                                         NIX_RX_OFFLOAD_PTYPE_F)]
338                                         [!!(dev->rx_offloads &
339                                                         NIX_RX_OFFLOAD_RSS_F)];
340                                 event_dev->dequeue_burst =
341                                         ssogws_dual_deq_seg_timeout_burst
342                                         [!!(dev->rx_offloads &
343                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
344                                         [!!(dev->rx_offloads &
345                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
346                                         [!!(dev->rx_offloads &
347                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
348                                         [!!(dev->rx_offloads &
349                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
350                                         [!!(dev->rx_offloads &
351                                                         NIX_RX_OFFLOAD_PTYPE_F)]
352                                         [!!(dev->rx_offloads &
353                                                         NIX_RX_OFFLOAD_RSS_F)];
354                         }
355                 } else {
356                         event_dev->dequeue              = ssogws_dual_deq
357                                 [!!(dev->rx_offloads &
358                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
359                                 [!!(dev->rx_offloads &
360                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
361                                 [!!(dev->rx_offloads &
362                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
363                                 [!!(dev->rx_offloads &
364                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
365                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
366                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
367                         event_dev->dequeue_burst        = ssogws_dual_deq_burst
368                                 [!!(dev->rx_offloads &
369                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
370                                 [!!(dev->rx_offloads &
371                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
372                                 [!!(dev->rx_offloads &
373                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
374                                 [!!(dev->rx_offloads &
375                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
376                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
377                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
378                         if (dev->is_timeout_deq) {
379                                 event_dev->dequeue      =
380                                         ssogws_dual_deq_timeout
381                                         [!!(dev->rx_offloads &
382                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
383                                         [!!(dev->rx_offloads &
384                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
385                                         [!!(dev->rx_offloads &
386                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
387                                         [!!(dev->rx_offloads &
388                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
389                                         [!!(dev->rx_offloads &
390                                                         NIX_RX_OFFLOAD_PTYPE_F)]
391                                         [!!(dev->rx_offloads &
392                                                         NIX_RX_OFFLOAD_RSS_F)];
393                                 event_dev->dequeue_burst =
394                                         ssogws_dual_deq_timeout_burst
395                                         [!!(dev->rx_offloads &
396                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
397                                         [!!(dev->rx_offloads &
398                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
399                                         [!!(dev->rx_offloads &
400                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
401                                         [!!(dev->rx_offloads &
402                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
403                                         [!!(dev->rx_offloads &
404                                                         NIX_RX_OFFLOAD_PTYPE_F)]
405                                         [!!(dev->rx_offloads &
406                                                         NIX_RX_OFFLOAD_RSS_F)];
407                         }
408                 }
409
410                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
411                 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
412                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
413                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
414                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
415                                 [!!(dev->tx_offloads &
416                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
417                                 [!!(dev->tx_offloads &
418                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
419                                 [!!(dev->tx_offloads &
420                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
421                                 [!!(dev->tx_offloads &
422                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
423                 } else {
424                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
425                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
426                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
427                                 [!!(dev->tx_offloads &
428                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
429                                 [!!(dev->tx_offloads &
430                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
431                                 [!!(dev->tx_offloads &
432                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
433                                 [!!(dev->tx_offloads &
434                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
435                 }
436         }
437
438         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
439         rte_mb();
440 }
441
442 static void
443 otx2_sso_info_get(struct rte_eventdev *event_dev,
444                   struct rte_event_dev_info *dev_info)
445 {
446         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
447
448         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
449         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
450         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
451         dev_info->max_event_queues = dev->max_event_queues;
452         dev_info->max_event_queue_flows = (1ULL << 20);
453         dev_info->max_event_queue_priority_levels = 8;
454         dev_info->max_event_priority_levels = 1;
455         dev_info->max_event_ports = dev->max_event_ports;
456         dev_info->max_event_port_dequeue_depth = 1;
457         dev_info->max_event_port_enqueue_depth = 1;
458         dev_info->max_num_events =  dev->max_num_events;
459         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
460                                         RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
461                                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
462                                         RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
463                                         RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
464                                         RTE_EVENT_DEV_CAP_NONSEQ_MODE;
465 }
466
467 static void
468 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
469 {
470         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
471         uint64_t val;
472
473         val = queue;
474         val |= 0ULL << 12; /* SET 0 */
475         val |= 0x8000800080000000; /* Dont modify rest of the masks */
476         val |= (uint64_t)enable << 14;   /* Enable/Disable Membership. */
477
478         otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
479 }
480
481 static int
482 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
483                    const uint8_t queues[], const uint8_t priorities[],
484                    uint16_t nb_links)
485 {
486         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
487         uint8_t port_id = 0;
488         uint16_t link;
489
490         RTE_SET_USED(priorities);
491         for (link = 0; link < nb_links; link++) {
492                 if (dev->dual_ws) {
493                         struct otx2_ssogws_dual *ws = port;
494
495                         port_id = ws->port;
496                         sso_port_link_modify((struct otx2_ssogws *)
497                                         &ws->ws_state[0], queues[link], true);
498                         sso_port_link_modify((struct otx2_ssogws *)
499                                         &ws->ws_state[1], queues[link], true);
500                 } else {
501                         struct otx2_ssogws *ws = port;
502
503                         port_id = ws->port;
504                         sso_port_link_modify(ws, queues[link], true);
505                 }
506         }
507         sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
508
509         return (int)nb_links;
510 }
511
512 static int
513 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
514                      uint8_t queues[], uint16_t nb_unlinks)
515 {
516         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
517         uint8_t port_id = 0;
518         uint16_t unlink;
519
520         for (unlink = 0; unlink < nb_unlinks; unlink++) {
521                 if (dev->dual_ws) {
522                         struct otx2_ssogws_dual *ws = port;
523
524                         port_id = ws->port;
525                         sso_port_link_modify((struct otx2_ssogws *)
526                                         &ws->ws_state[0], queues[unlink],
527                                         false);
528                         sso_port_link_modify((struct otx2_ssogws *)
529                                         &ws->ws_state[1], queues[unlink],
530                                         false);
531                 } else {
532                         struct otx2_ssogws *ws = port;
533
534                         port_id = ws->port;
535                         sso_port_link_modify(ws, queues[unlink], false);
536                 }
537         }
538         sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
539
540         return (int)nb_unlinks;
541 }
542
543 static int
544 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
545               uint16_t nb_lf, uint8_t attach)
546 {
547         if (attach) {
548                 struct rsrc_attach_req *req;
549
550                 req = otx2_mbox_alloc_msg_attach_resources(mbox);
551                 switch (type) {
552                 case SSO_LF_GGRP:
553                         req->sso = nb_lf;
554                         break;
555                 case SSO_LF_GWS:
556                         req->ssow = nb_lf;
557                         break;
558                 default:
559                         return -EINVAL;
560                 }
561                 req->modify = true;
562                 if (otx2_mbox_process(mbox) < 0)
563                         return -EIO;
564         } else {
565                 struct rsrc_detach_req *req;
566
567                 req = otx2_mbox_alloc_msg_detach_resources(mbox);
568                 switch (type) {
569                 case SSO_LF_GGRP:
570                         req->sso = true;
571                         break;
572                 case SSO_LF_GWS:
573                         req->ssow = true;
574                         break;
575                 default:
576                         return -EINVAL;
577                 }
578                 req->partial = true;
579                 if (otx2_mbox_process(mbox) < 0)
580                         return -EIO;
581         }
582
583         return 0;
584 }
585
586 static int
587 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
588            enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
589 {
590         void *rsp;
591         int rc;
592
593         if (alloc) {
594                 switch (type) {
595                 case SSO_LF_GGRP:
596                         {
597                         struct sso_lf_alloc_req *req_ggrp;
598                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
599                         req_ggrp->hwgrps = nb_lf;
600                         }
601                         break;
602                 case SSO_LF_GWS:
603                         {
604                         struct ssow_lf_alloc_req *req_hws;
605                         req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
606                         req_hws->hws = nb_lf;
607                         }
608                         break;
609                 default:
610                         return -EINVAL;
611                 }
612         } else {
613                 switch (type) {
614                 case SSO_LF_GGRP:
615                         {
616                         struct sso_lf_free_req *req_ggrp;
617                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
618                         req_ggrp->hwgrps = nb_lf;
619                         }
620                         break;
621                 case SSO_LF_GWS:
622                         {
623                         struct ssow_lf_free_req *req_hws;
624                         req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
625                         req_hws->hws = nb_lf;
626                         }
627                         break;
628                 default:
629                         return -EINVAL;
630                 }
631         }
632
633         rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
634         if (rc < 0)
635                 return rc;
636
637         if (alloc && type == SSO_LF_GGRP) {
638                 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
639
640                 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
641                 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
642                 dev->iue = rsp_ggrp->in_unit_entries;
643         }
644
645         return 0;
646 }
647
648 static void
649 otx2_sso_port_release(void *port)
650 {
651         rte_free(port);
652 }
653
654 static void
655 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
656 {
657         RTE_SET_USED(event_dev);
658         RTE_SET_USED(queue_id);
659 }
660
661 static void
662 sso_clr_links(const struct rte_eventdev *event_dev)
663 {
664         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
665         int i, j;
666
667         for (i = 0; i < dev->nb_event_ports; i++) {
668                 if (dev->dual_ws) {
669                         struct otx2_ssogws_dual *ws;
670
671                         ws = event_dev->data->ports[i];
672                         for (j = 0; j < dev->nb_event_queues; j++) {
673                                 sso_port_link_modify((struct otx2_ssogws *)
674                                                 &ws->ws_state[0], j, false);
675                                 sso_port_link_modify((struct otx2_ssogws *)
676                                                 &ws->ws_state[1], j, false);
677                         }
678                 } else {
679                         struct otx2_ssogws *ws;
680
681                         ws = event_dev->data->ports[i];
682                         for (j = 0; j < dev->nb_event_queues; j++)
683                                 sso_port_link_modify(ws, j, false);
684                 }
685         }
686 }
687
688 static void
689 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
690 {
691         ws->tag_op              = base + SSOW_LF_GWS_TAG;
692         ws->wqp_op              = base + SSOW_LF_GWS_WQP;
693         ws->getwrk_op           = base + SSOW_LF_GWS_OP_GET_WORK;
694         ws->swtp_op             = base + SSOW_LF_GWS_SWTP;
695         ws->swtag_norm_op       = base + SSOW_LF_GWS_OP_SWTAG_NORM;
696         ws->swtag_desched_op    = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
697 }
698
699 static int
700 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
701 {
702         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
703         struct otx2_mbox *mbox = dev->mbox;
704         uint8_t vws = 0;
705         uint8_t nb_lf;
706         int i, rc;
707
708         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
709
710         nb_lf = dev->nb_event_ports * 2;
711         /* Ask AF to attach required LFs. */
712         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
713         if (rc < 0) {
714                 otx2_err("Failed to attach SSO GWS LF");
715                 return -ENODEV;
716         }
717
718         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
719                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
720                 otx2_err("Failed to init SSO GWS LF");
721                 return -ENODEV;
722         }
723
724         for (i = 0; i < dev->nb_event_ports; i++) {
725                 struct otx2_ssogws_dual *ws;
726                 uintptr_t base;
727
728                 /* Free memory prior to re-allocation if needed */
729                 if (event_dev->data->ports[i] != NULL) {
730                         ws = event_dev->data->ports[i];
731                         rte_free(ws);
732                         ws = NULL;
733                 }
734
735                 /* Allocate event port memory */
736                 ws = rte_zmalloc_socket("otx2_sso_ws",
737                                         sizeof(struct otx2_ssogws_dual),
738                                         RTE_CACHE_LINE_SIZE,
739                                         event_dev->data->socket_id);
740                 if (ws == NULL) {
741                         otx2_err("Failed to alloc memory for port=%d", i);
742                         rc = -ENOMEM;
743                         break;
744                 }
745
746                 ws->port = i;
747                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
748                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
749                 vws++;
750
751                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
752                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
753                 vws++;
754
755                 event_dev->data->ports[i] = ws;
756         }
757
758         if (rc < 0) {
759                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
760                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
761         }
762
763         return rc;
764 }
765
766 static int
767 sso_configure_ports(const struct rte_eventdev *event_dev)
768 {
769         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
770         struct otx2_mbox *mbox = dev->mbox;
771         uint8_t nb_lf;
772         int i, rc;
773
774         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
775
776         nb_lf = dev->nb_event_ports;
777         /* Ask AF to attach required LFs. */
778         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
779         if (rc < 0) {
780                 otx2_err("Failed to attach SSO GWS LF");
781                 return -ENODEV;
782         }
783
784         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
785                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
786                 otx2_err("Failed to init SSO GWS LF");
787                 return -ENODEV;
788         }
789
790         for (i = 0; i < nb_lf; i++) {
791                 struct otx2_ssogws *ws;
792                 uintptr_t base;
793
794                 /* Free memory prior to re-allocation if needed */
795                 if (event_dev->data->ports[i] != NULL) {
796                         ws = event_dev->data->ports[i];
797                         rte_free(ws);
798                         ws = NULL;
799                 }
800
801                 /* Allocate event port memory */
802                 ws = rte_zmalloc_socket("otx2_sso_ws",
803                                         sizeof(struct otx2_ssogws),
804                                         RTE_CACHE_LINE_SIZE,
805                                         event_dev->data->socket_id);
806                 if (ws == NULL) {
807                         otx2_err("Failed to alloc memory for port=%d", i);
808                         rc = -ENOMEM;
809                         break;
810                 }
811
812                 ws->port = i;
813                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
814                 sso_set_port_ops(ws, base);
815
816                 event_dev->data->ports[i] = ws;
817         }
818
819         if (rc < 0) {
820                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
821                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
822         }
823
824         return rc;
825 }
826
827 static int
828 sso_configure_queues(const struct rte_eventdev *event_dev)
829 {
830         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
831         struct otx2_mbox *mbox = dev->mbox;
832         uint8_t nb_lf;
833         int rc;
834
835         otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
836
837         nb_lf = dev->nb_event_queues;
838         /* Ask AF to attach required LFs. */
839         rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
840         if (rc < 0) {
841                 otx2_err("Failed to attach SSO GGRP LF");
842                 return -ENODEV;
843         }
844
845         if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
846                 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
847                 otx2_err("Failed to init SSO GGRP LF");
848                 return -ENODEV;
849         }
850
851         return rc;
852 }
853
854 static int
855 sso_xaq_allocate(struct otx2_sso_evdev *dev)
856 {
857         const struct rte_memzone *mz;
858         struct npa_aura_s *aura;
859         static int reconfig_cnt;
860         char pool_name[RTE_MEMZONE_NAMESIZE];
861         uint32_t xaq_cnt;
862         int rc;
863
864         if (dev->xaq_pool)
865                 rte_mempool_free(dev->xaq_pool);
866
867         /*
868          * Allocate memory for Add work backpressure.
869          */
870         mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
871         if (mz == NULL)
872                 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
873                                                  OTX2_ALIGN +
874                                                  sizeof(struct npa_aura_s),
875                                                  rte_socket_id(),
876                                                  RTE_MEMZONE_IOVA_CONTIG,
877                                                  OTX2_ALIGN);
878         if (mz == NULL) {
879                 otx2_err("Failed to allocate mem for fcmem");
880                 return -ENOMEM;
881         }
882
883         dev->fc_iova = mz->iova;
884         dev->fc_mem = mz->addr;
885
886         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
887         memset(aura, 0, sizeof(struct npa_aura_s));
888
889         aura->fc_ena = 1;
890         aura->fc_addr = dev->fc_iova;
891         aura->fc_hyst_bits = 0; /* Store count on all updates */
892
893         /* Taken from HRM 14.3.3(4) */
894         xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
895         if (dev->xae_cnt)
896                 xaq_cnt += dev->xae_cnt / dev->xae_waes;
897         else if (dev->adptr_xae_cnt)
898                 xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
899                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
900         else
901                 xaq_cnt += (dev->iue / dev->xae_waes) +
902                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
903
904         otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
905         /* Setup XAQ based on number of nb queues. */
906         snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
907         dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
908                         xaq_cnt, dev->xaq_buf_size, 0, 0,
909                         rte_socket_id(), 0);
910
911         if (dev->xaq_pool == NULL) {
912                 otx2_err("Unable to create empty mempool.");
913                 rte_memzone_free(mz);
914                 return -ENOMEM;
915         }
916
917         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
918                                         rte_mbuf_platform_mempool_ops(), aura);
919         if (rc != 0) {
920                 otx2_err("Unable to set xaqpool ops.");
921                 goto alloc_fail;
922         }
923
924         rc = rte_mempool_populate_default(dev->xaq_pool);
925         if (rc < 0) {
926                 otx2_err("Unable to set populate xaqpool.");
927                 goto alloc_fail;
928         }
929         reconfig_cnt++;
930         /* When SW does addwork (enqueue) check if there is space in XAQ by
931          * comparing fc_addr above against the xaq_lmt calculated below.
932          * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
933          * to request XAQ to cache them even before enqueue is called.
934          */
935         dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
936                                   dev->nb_event_queues);
937         dev->nb_xaq_cfg = xaq_cnt;
938
939         return 0;
940 alloc_fail:
941         rte_mempool_free(dev->xaq_pool);
942         rte_memzone_free(mz);
943         return rc;
944 }
945
946 static int
947 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
948 {
949         struct otx2_mbox *mbox = dev->mbox;
950         struct sso_hw_setconfig *req;
951
952         otx2_sso_dbg("Configuring XAQ for GGRPs");
953         req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
954         req->npa_pf_func = otx2_npa_pf_func_get();
955         req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
956         req->hwgrps = dev->nb_event_queues;
957
958         return otx2_mbox_process(mbox);
959 }
960
961 static void
962 sso_lf_teardown(struct otx2_sso_evdev *dev,
963                 enum otx2_sso_lf_type lf_type)
964 {
965         uint8_t nb_lf;
966
967         switch (lf_type) {
968         case SSO_LF_GGRP:
969                 nb_lf = dev->nb_event_queues;
970                 break;
971         case SSO_LF_GWS:
972                 nb_lf = dev->nb_event_ports;
973                 nb_lf *= dev->dual_ws ? 2 : 1;
974                 break;
975         default:
976                 return;
977         }
978
979         sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
980         sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
981 }
982
983 static int
984 otx2_sso_configure(const struct rte_eventdev *event_dev)
985 {
986         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
987         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
988         uint32_t deq_tmo_ns;
989         int rc;
990
991         sso_func_trace();
992         deq_tmo_ns = conf->dequeue_timeout_ns;
993
994         if (deq_tmo_ns == 0)
995                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
996
997         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
998             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
999                 otx2_err("Unsupported dequeue timeout requested");
1000                 return -EINVAL;
1001         }
1002
1003         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
1004                 dev->is_timeout_deq = 1;
1005
1006         dev->deq_tmo_ns = deq_tmo_ns;
1007
1008         if (conf->nb_event_ports > dev->max_event_ports ||
1009             conf->nb_event_queues > dev->max_event_queues) {
1010                 otx2_err("Unsupported event queues/ports requested");
1011                 return -EINVAL;
1012         }
1013
1014         if (conf->nb_event_port_dequeue_depth > 1) {
1015                 otx2_err("Unsupported event port deq depth requested");
1016                 return -EINVAL;
1017         }
1018
1019         if (conf->nb_event_port_enqueue_depth > 1) {
1020                 otx2_err("Unsupported event port enq depth requested");
1021                 return -EINVAL;
1022         }
1023
1024         if (dev->configured)
1025                 sso_unregister_irqs(event_dev);
1026
1027         if (dev->nb_event_queues) {
1028                 /* Finit any previous queues. */
1029                 sso_lf_teardown(dev, SSO_LF_GGRP);
1030         }
1031         if (dev->nb_event_ports) {
1032                 /* Finit any previous ports. */
1033                 sso_lf_teardown(dev, SSO_LF_GWS);
1034         }
1035
1036         dev->nb_event_queues = conf->nb_event_queues;
1037         dev->nb_event_ports = conf->nb_event_ports;
1038
1039         if (dev->dual_ws)
1040                 rc = sso_configure_dual_ports(event_dev);
1041         else
1042                 rc = sso_configure_ports(event_dev);
1043
1044         if (rc < 0) {
1045                 otx2_err("Failed to configure event ports");
1046                 return -ENODEV;
1047         }
1048
1049         if (sso_configure_queues(event_dev) < 0) {
1050                 otx2_err("Failed to configure event queues");
1051                 rc = -ENODEV;
1052                 goto teardown_hws;
1053         }
1054
1055         if (sso_xaq_allocate(dev) < 0) {
1056                 rc = -ENOMEM;
1057                 goto teardown_hwggrp;
1058         }
1059
1060         /* Clear any prior port-queue mapping. */
1061         sso_clr_links(event_dev);
1062         rc = sso_ggrp_alloc_xaq(dev);
1063         if (rc < 0) {
1064                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1065                 goto teardown_hwggrp;
1066         }
1067
1068         rc = sso_get_msix_offsets(event_dev);
1069         if (rc < 0) {
1070                 otx2_err("Failed to get msix offsets %d", rc);
1071                 goto teardown_hwggrp;
1072         }
1073
1074         rc = sso_register_irqs(event_dev);
1075         if (rc < 0) {
1076                 otx2_err("Failed to register irq %d", rc);
1077                 goto teardown_hwggrp;
1078         }
1079
1080         dev->configured = 1;
1081         rte_mb();
1082
1083         return 0;
1084 teardown_hwggrp:
1085         sso_lf_teardown(dev, SSO_LF_GGRP);
1086 teardown_hws:
1087         sso_lf_teardown(dev, SSO_LF_GWS);
1088         dev->nb_event_queues = 0;
1089         dev->nb_event_ports = 0;
1090         dev->configured = 0;
1091         return rc;
1092 }
1093
1094 static void
1095 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1096                         struct rte_event_queue_conf *queue_conf)
1097 {
1098         RTE_SET_USED(event_dev);
1099         RTE_SET_USED(queue_id);
1100
1101         queue_conf->nb_atomic_flows = (1ULL << 20);
1102         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1103         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1104         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1105 }
1106
1107 static int
1108 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1109                      const struct rte_event_queue_conf *queue_conf)
1110 {
1111         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1112         struct otx2_mbox *mbox = dev->mbox;
1113         struct sso_grp_priority *req;
1114         int rc;
1115
1116         sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1117
1118         req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1119         req->grp = queue_id;
1120         req->weight = 0xFF;
1121         req->affinity = 0xFF;
1122         /* Normalize <0-255> to <0-7> */
1123         req->priority = queue_conf->priority / 32;
1124
1125         rc = otx2_mbox_process(mbox);
1126         if (rc < 0) {
1127                 otx2_err("Failed to set priority queue=%d", queue_id);
1128                 return rc;
1129         }
1130
1131         return 0;
1132 }
1133
1134 static void
1135 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1136                        struct rte_event_port_conf *port_conf)
1137 {
1138         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1139
1140         RTE_SET_USED(port_id);
1141         port_conf->new_event_threshold = dev->max_num_events;
1142         port_conf->dequeue_depth = 1;
1143         port_conf->enqueue_depth = 1;
1144 }
1145
1146 static int
1147 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1148                     const struct rte_event_port_conf *port_conf)
1149 {
1150         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1151         uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1152         uint64_t val;
1153         uint16_t q;
1154
1155         sso_func_trace("Port=%d", port_id);
1156         RTE_SET_USED(port_conf);
1157
1158         if (event_dev->data->ports[port_id] == NULL) {
1159                 otx2_err("Invalid port Id %d", port_id);
1160                 return -EINVAL;
1161         }
1162
1163         for (q = 0; q < dev->nb_event_queues; q++) {
1164                 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1165                 if (grps_base[q] == 0) {
1166                         otx2_err("Failed to get grp[%d] base addr", q);
1167                         return -EINVAL;
1168                 }
1169         }
1170
1171         /* Set get_work timeout for HWS */
1172         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1173
1174         if (dev->dual_ws) {
1175                 struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1176
1177                 rte_memcpy(ws->grps_base, grps_base,
1178                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1179                 ws->fc_mem = dev->fc_mem;
1180                 ws->xaq_lmt = dev->xaq_lmt;
1181                 ws->tstamp = dev->tstamp;
1182                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1183                              ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1184                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1185                              ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1186         } else {
1187                 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1188                 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1189
1190                 rte_memcpy(ws->grps_base, grps_base,
1191                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1192                 ws->fc_mem = dev->fc_mem;
1193                 ws->xaq_lmt = dev->xaq_lmt;
1194                 ws->tstamp = dev->tstamp;
1195                 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1196         }
1197
1198         otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1199
1200         return 0;
1201 }
1202
1203 static int
1204 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1205                        uint64_t *tmo_ticks)
1206 {
1207         RTE_SET_USED(event_dev);
1208         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1209
1210         return 0;
1211 }
1212
1213 static void
1214 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1215 {
1216         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1217
1218         fprintf(f, "SSOW_LF_GWS Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1219         fprintf(f, "SSOW_LF_GWS_LINKS       0x%" PRIx64 "\n",
1220                 otx2_read64(base + SSOW_LF_GWS_LINKS));
1221         fprintf(f, "SSOW_LF_GWS_PENDWQP     0x%" PRIx64 "\n",
1222                 otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1223         fprintf(f, "SSOW_LF_GWS_PENDSTATE   0x%" PRIx64 "\n",
1224                 otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1225         fprintf(f, "SSOW_LF_GWS_NW_TIM      0x%" PRIx64 "\n",
1226                 otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1227         fprintf(f, "SSOW_LF_GWS_TAG         0x%" PRIx64 "\n",
1228                 otx2_read64(base + SSOW_LF_GWS_TAG));
1229         fprintf(f, "SSOW_LF_GWS_WQP         0x%" PRIx64 "\n",
1230                 otx2_read64(base + SSOW_LF_GWS_TAG));
1231         fprintf(f, "SSOW_LF_GWS_SWTP        0x%" PRIx64 "\n",
1232                 otx2_read64(base + SSOW_LF_GWS_SWTP));
1233         fprintf(f, "SSOW_LF_GWS_PENDTAG     0x%" PRIx64 "\n",
1234                 otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1235 }
1236
1237 static void
1238 ssoggrp_dump(uintptr_t base, FILE *f)
1239 {
1240         fprintf(f, "SSO_LF_GGRP Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1241         fprintf(f, "SSO_LF_GGRP_QCTL        0x%" PRIx64 "\n",
1242                 otx2_read64(base + SSO_LF_GGRP_QCTL));
1243         fprintf(f, "SSO_LF_GGRP_XAQ_CNT     0x%" PRIx64 "\n",
1244                 otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1245         fprintf(f, "SSO_LF_GGRP_INT_THR     0x%" PRIx64 "\n",
1246                 otx2_read64(base + SSO_LF_GGRP_INT_THR));
1247         fprintf(f, "SSO_LF_GGRP_INT_CNT     0x%" PRIX64 "\n",
1248                 otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1249         fprintf(f, "SSO_LF_GGRP_AQ_CNT      0x%" PRIX64 "\n",
1250                 otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1251         fprintf(f, "SSO_LF_GGRP_AQ_THR      0x%" PRIX64 "\n",
1252                 otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1253         fprintf(f, "SSO_LF_GGRP_MISC_CNT    0x%" PRIx64 "\n",
1254                 otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1255 }
1256
1257 static void
1258 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1259 {
1260         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1261         uint8_t queue;
1262         uint8_t port;
1263
1264         fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1265                 "dual_ws" : "single_ws");
1266         /* Dump SSOW registers */
1267         for (port = 0; port < dev->nb_event_ports; port++) {
1268                 if (dev->dual_ws) {
1269                         struct otx2_ssogws_dual *ws =
1270                                 event_dev->data->ports[port];
1271
1272                         fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1273                                 __func__, port, 0);
1274                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1275                         fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1276                                 __func__, port, 1);
1277                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1278                 } else {
1279                         fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1280                                 __func__, port);
1281                         ssogws_dump(event_dev->data->ports[port], f);
1282                 }
1283         }
1284
1285         /* Dump SSO registers */
1286         for (queue = 0; queue < dev->nb_event_queues; queue++) {
1287                 fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1288                 if (dev->dual_ws) {
1289                         struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1290                         ssoggrp_dump(ws->grps_base[queue], f);
1291                 } else {
1292                         struct otx2_ssogws *ws = event_dev->data->ports[0];
1293                         ssoggrp_dump(ws->grps_base[queue], f);
1294                 }
1295         }
1296 }
1297
1298 static void
1299 otx2_handle_event(void *arg, struct rte_event event)
1300 {
1301         struct rte_eventdev *event_dev = arg;
1302
1303         if (event_dev->dev_ops->dev_stop_flush != NULL)
1304                 event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1305                                 event, event_dev->data->dev_stop_flush_arg);
1306 }
1307
1308 static void
1309 sso_qos_cfg(struct rte_eventdev *event_dev)
1310 {
1311         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1312         struct sso_grp_qos_cfg *req;
1313         uint16_t i;
1314
1315         for (i = 0; i < dev->qos_queue_cnt; i++) {
1316                 uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1317                 uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1318                 uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1319
1320                 if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1321                         continue;
1322
1323                 req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1324                 req->xaq_limit = (dev->nb_xaq_cfg *
1325                                   (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1326                 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1327                                 (iaq_prcnt ? iaq_prcnt : 100)) / 100;
1328                 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1329                                 (taq_prcnt ? taq_prcnt : 100)) / 100;
1330         }
1331
1332         if (dev->qos_queue_cnt)
1333                 otx2_mbox_process(dev->mbox);
1334 }
1335
1336 static void
1337 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1338 {
1339         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1340         uint16_t i;
1341
1342         for (i = 0; i < dev->nb_event_ports; i++) {
1343                 if (dev->dual_ws) {
1344                         struct otx2_ssogws_dual *ws;
1345
1346                         ws = event_dev->data->ports[i];
1347                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1348                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1349                         ws->swtag_req = 0;
1350                         ws->vws = 0;
1351                         ws->ws_state[0].cur_grp = 0;
1352                         ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1353                         ws->ws_state[1].cur_grp = 0;
1354                         ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
1355                 } else {
1356                         struct otx2_ssogws *ws;
1357
1358                         ws = event_dev->data->ports[i];
1359                         ssogws_reset(ws);
1360                         ws->swtag_req = 0;
1361                         ws->cur_grp = 0;
1362                         ws->cur_tt = SSO_SYNC_EMPTY;
1363                 }
1364         }
1365
1366         rte_mb();
1367         if (dev->dual_ws) {
1368                 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1369                 struct otx2_ssogws temp_ws;
1370
1371                 memcpy(&temp_ws, &ws->ws_state[0],
1372                        sizeof(struct otx2_ssogws_state));
1373                 for (i = 0; i < dev->nb_event_queues; i++) {
1374                         /* Consume all the events through HWS0 */
1375                         ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1376                                             otx2_handle_event, event_dev);
1377                         /* Enable/Disable SSO GGRP */
1378                         otx2_write64(enable, ws->grps_base[i] +
1379                                      SSO_LF_GGRP_QCTL);
1380                 }
1381                 ws->ws_state[0].cur_grp = 0;
1382                 ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1383         } else {
1384                 struct otx2_ssogws *ws = event_dev->data->ports[0];
1385
1386                 for (i = 0; i < dev->nb_event_queues; i++) {
1387                         /* Consume all the events through HWS0 */
1388                         ssogws_flush_events(ws, i, ws->grps_base[i],
1389                                             otx2_handle_event, event_dev);
1390                         /* Enable/Disable SSO GGRP */
1391                         otx2_write64(enable, ws->grps_base[i] +
1392                                      SSO_LF_GGRP_QCTL);
1393                 }
1394                 ws->cur_grp = 0;
1395                 ws->cur_tt = SSO_SYNC_EMPTY;
1396         }
1397
1398         /* reset SSO GWS cache */
1399         otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1400         otx2_mbox_process(dev->mbox);
1401 }
1402
1403 int
1404 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1405 {
1406         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1407         struct rte_mempool *prev_xaq_pool;
1408         int rc = 0;
1409
1410         if (event_dev->data->dev_started)
1411                 sso_cleanup(event_dev, 0);
1412
1413         prev_xaq_pool = dev->xaq_pool;
1414         dev->xaq_pool = NULL;
1415         rc = sso_xaq_allocate(dev);
1416         if (rc < 0) {
1417                 otx2_err("Failed to alloc xaq pool %d", rc);
1418                 rte_mempool_free(prev_xaq_pool);
1419                 return rc;
1420         }
1421         rc = sso_ggrp_alloc_xaq(dev);
1422         if (rc < 0) {
1423                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1424                 rte_mempool_free(prev_xaq_pool);
1425                 return rc;
1426         }
1427
1428         rte_mempool_free(prev_xaq_pool);
1429         rte_mb();
1430         if (event_dev->data->dev_started)
1431                 sso_cleanup(event_dev, 1);
1432
1433         return 0;
1434 }
1435
1436 static int
1437 otx2_sso_start(struct rte_eventdev *event_dev)
1438 {
1439         sso_func_trace();
1440         sso_qos_cfg(event_dev);
1441         sso_cleanup(event_dev, 1);
1442         sso_fastpath_fns_set(event_dev);
1443
1444         return 0;
1445 }
1446
1447 static void
1448 otx2_sso_stop(struct rte_eventdev *event_dev)
1449 {
1450         sso_func_trace();
1451         sso_cleanup(event_dev, 0);
1452         rte_mb();
1453 }
1454
1455 static int
1456 otx2_sso_close(struct rte_eventdev *event_dev)
1457 {
1458         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1459         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1460         uint16_t i;
1461
1462         if (!dev->configured)
1463                 return 0;
1464
1465         sso_unregister_irqs(event_dev);
1466
1467         for (i = 0; i < dev->nb_event_queues; i++)
1468                 all_queues[i] = i;
1469
1470         for (i = 0; i < dev->nb_event_ports; i++)
1471                 otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1472                                      all_queues, dev->nb_event_queues);
1473
1474         sso_lf_teardown(dev, SSO_LF_GGRP);
1475         sso_lf_teardown(dev, SSO_LF_GWS);
1476         dev->nb_event_ports = 0;
1477         dev->nb_event_queues = 0;
1478         rte_mempool_free(dev->xaq_pool);
1479         rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1480
1481         return 0;
1482 }
1483
1484 /* Initialize and register event driver with DPDK Application */
1485 static struct rte_eventdev_ops otx2_sso_ops = {
1486         .dev_infos_get    = otx2_sso_info_get,
1487         .dev_configure    = otx2_sso_configure,
1488         .queue_def_conf   = otx2_sso_queue_def_conf,
1489         .queue_setup      = otx2_sso_queue_setup,
1490         .queue_release    = otx2_sso_queue_release,
1491         .port_def_conf    = otx2_sso_port_def_conf,
1492         .port_setup       = otx2_sso_port_setup,
1493         .port_release     = otx2_sso_port_release,
1494         .port_link        = otx2_sso_port_link,
1495         .port_unlink      = otx2_sso_port_unlink,
1496         .timeout_ticks    = otx2_sso_timeout_ticks,
1497
1498         .eth_rx_adapter_caps_get  = otx2_sso_rx_adapter_caps_get,
1499         .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1500         .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1501         .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1502         .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1503
1504         .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1505         .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1506         .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1507
1508         .timer_adapter_caps_get = otx2_tim_caps_get,
1509
1510         .xstats_get       = otx2_sso_xstats_get,
1511         .xstats_reset     = otx2_sso_xstats_reset,
1512         .xstats_get_names = otx2_sso_xstats_get_names,
1513
1514         .dump             = otx2_sso_dump,
1515         .dev_start        = otx2_sso_start,
1516         .dev_stop         = otx2_sso_stop,
1517         .dev_close        = otx2_sso_close,
1518         .dev_selftest     = otx2_sso_selftest,
1519 };
1520
1521 #define OTX2_SSO_XAE_CNT        "xae_cnt"
1522 #define OTX2_SSO_SINGLE_WS      "single_ws"
1523 #define OTX2_SSO_GGRP_QOS       "qos"
1524 #define OTX2_SSO_SELFTEST       "selftest"
1525
1526 static void
1527 parse_queue_param(char *value, void *opaque)
1528 {
1529         struct otx2_sso_qos queue_qos = {0};
1530         uint8_t *val = (uint8_t *)&queue_qos;
1531         struct otx2_sso_evdev *dev = opaque;
1532         char *tok = strtok(value, "-");
1533         struct otx2_sso_qos *old_ptr;
1534
1535         if (!strlen(value))
1536                 return;
1537
1538         while (tok != NULL) {
1539                 *val = atoi(tok);
1540                 tok = strtok(NULL, "-");
1541                 val++;
1542         }
1543
1544         if (val != (&queue_qos.iaq_prcnt + 1)) {
1545                 otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1546                 return;
1547         }
1548
1549         dev->qos_queue_cnt++;
1550         old_ptr = dev->qos_parse_data;
1551         dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1552                                           sizeof(struct otx2_sso_qos) *
1553                                           dev->qos_queue_cnt, 0);
1554         if (dev->qos_parse_data == NULL) {
1555                 dev->qos_parse_data = old_ptr;
1556                 dev->qos_queue_cnt--;
1557                 return;
1558         }
1559         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1560 }
1561
1562 static void
1563 parse_qos_list(const char *value, void *opaque)
1564 {
1565         char *s = strdup(value);
1566         char *start = NULL;
1567         char *end = NULL;
1568         char *f = s;
1569
1570         while (*s) {
1571                 if (*s == '[')
1572                         start = s;
1573                 else if (*s == ']')
1574                         end = s;
1575
1576                 if (start && start < end) {
1577                         *end = 0;
1578                         parse_queue_param(start + 1, opaque);
1579                         s = end;
1580                         start = end;
1581                 }
1582                 s++;
1583         }
1584
1585         free(f);
1586 }
1587
1588 static int
1589 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1590 {
1591         RTE_SET_USED(key);
1592
1593         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1594          * isn't allowed. Everything is expressed in percentages, 0 represents
1595          * default.
1596          */
1597         parse_qos_list(value, opaque);
1598
1599         return 0;
1600 }
1601
1602 static void
1603 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1604 {
1605         struct rte_kvargs *kvlist;
1606         uint8_t single_ws = 0;
1607
1608         if (devargs == NULL)
1609                 return;
1610         kvlist = rte_kvargs_parse(devargs->args, NULL);
1611         if (kvlist == NULL)
1612                 return;
1613
1614         rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,
1615                            &dev->selftest);
1616         rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1617                            &dev->xae_cnt);
1618         rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1619                            &single_ws);
1620         rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1621                            dev);
1622
1623         dev->dual_ws = !single_ws;
1624         rte_kvargs_free(kvlist);
1625 }
1626
1627 static int
1628 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1629 {
1630         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1631                                        sizeof(struct otx2_sso_evdev),
1632                                        otx2_sso_init);
1633 }
1634
1635 static int
1636 otx2_sso_remove(struct rte_pci_device *pci_dev)
1637 {
1638         return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1639 }
1640
1641 static const struct rte_pci_id pci_sso_map[] = {
1642         {
1643                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1644                                PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1645         },
1646         {
1647                 .vendor_id = 0,
1648         },
1649 };
1650
1651 static struct rte_pci_driver pci_sso = {
1652         .id_table = pci_sso_map,
1653         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1654         .probe = otx2_sso_probe,
1655         .remove = otx2_sso_remove,
1656 };
1657
1658 int
1659 otx2_sso_init(struct rte_eventdev *event_dev)
1660 {
1661         struct free_rsrcs_rsp *rsrc_cnt;
1662         struct rte_pci_device *pci_dev;
1663         struct otx2_sso_evdev *dev;
1664         int rc;
1665
1666         event_dev->dev_ops = &otx2_sso_ops;
1667         /* For secondary processes, the primary has done all the work */
1668         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1669                 sso_fastpath_fns_set(event_dev);
1670                 return 0;
1671         }
1672
1673         dev = sso_pmd_priv(event_dev);
1674
1675         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1676
1677         /* Initialize the base otx2_dev object */
1678         rc = otx2_dev_init(pci_dev, dev);
1679         if (rc < 0) {
1680                 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1681                 goto error;
1682         }
1683
1684         /* Get SSO and SSOW MSIX rsrc cnt */
1685         otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1686         rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1687         if (rc < 0) {
1688                 otx2_err("Unable to get free rsrc count");
1689                 goto otx2_dev_uninit;
1690         }
1691         otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1692                      rsrc_cnt->ssow, rsrc_cnt->npa);
1693
1694         dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1695         dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1696         /* Grab the NPA LF if required */
1697         rc = otx2_npa_lf_init(pci_dev, dev);
1698         if (rc < 0) {
1699                 otx2_err("Unable to init NPA lf. It might not be provisioned");
1700                 goto otx2_dev_uninit;
1701         }
1702
1703         dev->drv_inited = true;
1704         dev->is_timeout_deq = 0;
1705         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1706         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1707         dev->max_num_events = -1;
1708         dev->nb_event_queues = 0;
1709         dev->nb_event_ports = 0;
1710
1711         if (!dev->max_event_ports || !dev->max_event_queues) {
1712                 otx2_err("Not enough eventdev resource queues=%d ports=%d",
1713                          dev->max_event_queues, dev->max_event_ports);
1714                 rc = -ENODEV;
1715                 goto otx2_npa_lf_uninit;
1716         }
1717
1718         dev->dual_ws = 1;
1719         sso_parse_devargs(dev, pci_dev->device.devargs);
1720         if (dev->dual_ws) {
1721                 otx2_sso_dbg("Using dual workslot mode");
1722                 dev->max_event_ports = dev->max_event_ports / 2;
1723         } else {
1724                 otx2_sso_dbg("Using single workslot mode");
1725         }
1726
1727         otx2_sso_pf_func_set(dev->pf_func);
1728         otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1729                      event_dev->data->name, dev->max_event_queues,
1730                      dev->max_event_ports);
1731         if (dev->selftest) {
1732                 event_dev->dev->driver = &pci_sso.driver;
1733                 event_dev->dev_ops->dev_selftest();
1734         }
1735
1736         otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1737
1738         return 0;
1739
1740 otx2_npa_lf_uninit:
1741         otx2_npa_lf_fini();
1742 otx2_dev_uninit:
1743         otx2_dev_fini(pci_dev, dev);
1744 error:
1745         return rc;
1746 }
1747
1748 int
1749 otx2_sso_fini(struct rte_eventdev *event_dev)
1750 {
1751         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1752         struct rte_pci_device *pci_dev;
1753
1754         /* For secondary processes, nothing to be done */
1755         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1756                 return 0;
1757
1758         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1759
1760         if (!dev->drv_inited)
1761                 goto dev_fini;
1762
1763         dev->drv_inited = false;
1764         otx2_npa_lf_fini();
1765
1766 dev_fini:
1767         if (otx2_npa_lf_active(dev)) {
1768                 otx2_info("Common resource in use by other devices");
1769                 return -EAGAIN;
1770         }
1771
1772         otx2_tim_fini();
1773         otx2_dev_fini(pci_dev, dev);
1774
1775         return 0;
1776 }
1777
1778 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1779 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1780 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1781 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1782                               OTX2_SSO_SINGLE_WS "=1"
1783                               OTX2_SSO_GGRP_QOS "=<string>"
1784                               OTX2_SSO_SELFTEST "=1");