e6379e3b42eec4eff3ced364cd216c319467132c
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
9 #include <rte_eal.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_pci.h>
14
15 #include "otx2_evdev_stats.h"
16 #include "otx2_evdev.h"
17 #include "otx2_irq.h"
18 #include "otx2_tim_evdev.h"
19
20 static inline int
21 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
22 {
23         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
24         uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
25         struct otx2_mbox *mbox = dev->mbox;
26         struct msix_offset_rsp *msix_rsp;
27         int i, rc;
28
29         /* Get SSO and SSOW MSIX vector offsets */
30         otx2_mbox_alloc_msg_msix_offset(mbox);
31         rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
32
33         for (i = 0; i < nb_ports; i++)
34                 dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
35
36         for (i = 0; i < dev->nb_event_queues; i++)
37                 dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
38
39         return rc;
40 }
41
42 void
43 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
44 {
45         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
46         /* Single WS modes */
47         const event_dequeue_t ssogws_deq[2][2][2][2][2][2] = {
48 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
49                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
50 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
51 #undef R
52         };
53
54         const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2] = {
55 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
56                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
57 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
58 #undef R
59         };
60
61         const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2] = {
62 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
63                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
64 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
65 #undef R
66         };
67
68         const event_dequeue_burst_t
69                 ssogws_deq_timeout_burst[2][2][2][2][2][2] = {
70 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
71                 [f5][f4][f3][f2][f1][f0] =                              \
72                         otx2_ssogws_deq_timeout_burst_ ##name,
73 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
74 #undef R
75         };
76
77         const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2] = {
78 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
79                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
80 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
81 #undef R
82         };
83
84         const event_dequeue_burst_t ssogws_deq_seg_burst[2][2][2][2][2][2] = {
85 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
86                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_burst_ ##name,
87 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
88 #undef R
89         };
90
91         const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2] = {
92 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
93                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_timeout_ ##name,
94 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
95 #undef R
96         };
97
98         const event_dequeue_burst_t
99                 ssogws_deq_seg_timeout_burst[2][2][2][2][2][2] = {
100 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
101                 [f5][f4][f3][f2][f1][f0] =                              \
102                                 otx2_ssogws_deq_seg_timeout_burst_ ##name,
103 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
104 #undef R
105         };
106
107
108         /* Dual WS modes */
109         const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2] = {
110 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
111                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
112 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
113 #undef R
114         };
115
116         const event_dequeue_burst_t ssogws_dual_deq_burst[2][2][2][2][2][2] = {
117 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
118                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_burst_ ##name,
119 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
120 #undef R
121         };
122
123         const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2] = {
124 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
125                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_ ##name,
126 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
127 #undef R
128         };
129
130         const event_dequeue_burst_t
131                 ssogws_dual_deq_timeout_burst[2][2][2][2][2][2] = {
132 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
133         [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_burst_ ##name,
134 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
135 #undef R
136         };
137
138         const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2] = {
139 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
140                 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
141 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
142 #undef R
143         };
144
145         const event_dequeue_burst_t
146                 ssogws_dual_deq_seg_burst[2][2][2][2][2][2] = {
147 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
148                 [f5][f4][f3][f2][f1][f0] =                              \
149                                 otx2_ssogws_dual_deq_seg_burst_ ##name,
150 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
151 #undef R
152         };
153
154         const event_dequeue_t ssogws_dual_deq_seg_timeout[2][2][2][2][2][2] = {
155 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
156                 [f5][f4][f3][f2][f1][f0] =                              \
157                                 otx2_ssogws_dual_deq_seg_timeout_ ##name,
158 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
159 #undef R
160         };
161
162         const event_dequeue_burst_t
163                 ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2] = {
164 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
165         [f5][f4][f3][f2][f1][f0] =                                      \
166                 otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
167 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
168 #undef R
169         };
170
171         /* Tx modes */
172         const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2] = {
173 #define T(name, f4, f3, f2, f1, f0, sz, flags)                          \
174                 [f4][f3][f2][f1][f0] =  otx2_ssogws_tx_adptr_enq_ ## name,
175 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
176 #undef T
177         };
178
179         const event_tx_adapter_enqueue
180                 ssogws_tx_adptr_enq_seg[2][2][2][2][2] = {
181 #define T(name, f4, f3, f2, f1, f0, sz, flags)                          \
182                 [f4][f3][f2][f1][f0] =  otx2_ssogws_tx_adptr_enq_seg_ ## name,
183 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
184 #undef T
185         };
186
187         const event_tx_adapter_enqueue
188                 ssogws_dual_tx_adptr_enq[2][2][2][2][2] = {
189 #define T(name, f4, f3, f2, f1, f0, sz, flags)                          \
190                 [f4][f3][f2][f1][f0] =  otx2_ssogws_dual_tx_adptr_enq_ ## name,
191 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
192 #undef T
193         };
194
195         const event_tx_adapter_enqueue
196                 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2] = {
197 #define T(name, f4, f3, f2, f1, f0, sz, flags)                          \
198                 [f4][f3][f2][f1][f0] =                                  \
199                         otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
200 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
201 #undef T
202         };
203
204         event_dev->enqueue                      = otx2_ssogws_enq;
205         event_dev->enqueue_burst                = otx2_ssogws_enq_burst;
206         event_dev->enqueue_new_burst            = otx2_ssogws_enq_new_burst;
207         event_dev->enqueue_forward_burst        = otx2_ssogws_enq_fwd_burst;
208         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
209                 event_dev->dequeue              = ssogws_deq_seg
210                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
211                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
212                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
213                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
214                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
215                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
216                 event_dev->dequeue_burst        = ssogws_deq_seg_burst
217                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
218                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
219                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
220                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
221                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
222                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
223                 if (dev->is_timeout_deq) {
224                         event_dev->dequeue      = ssogws_deq_seg_timeout
225                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
226                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
227                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
228                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
229                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
230                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
231                         event_dev->dequeue_burst        =
232                                 ssogws_deq_seg_timeout_burst
233                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
234                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
235                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
236                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
237                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
238                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
239                 }
240         } else {
241                 event_dev->dequeue                      = ssogws_deq
242                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
243                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
244                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
245                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
246                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
247                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
248                 event_dev->dequeue_burst                = ssogws_deq_burst
249                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
250                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
251                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
252                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
253                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
254                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
255                 if (dev->is_timeout_deq) {
256                         event_dev->dequeue              = ssogws_deq_timeout
257                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
258                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
259                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
260                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
261                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
262                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
263                         event_dev->dequeue_burst        =
264                                 ssogws_deq_timeout_burst
265                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
266                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
267                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
268                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
269                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
270                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
271                 }
272         }
273
274         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
275                 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
276                 event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
277                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
278                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
279                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
280                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
281                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
282         } else {
283                 event_dev->txa_enqueue = ssogws_tx_adptr_enq
284                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
285                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
286                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
287                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
288                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
289         }
290
291         if (dev->dual_ws) {
292                 event_dev->enqueue              = otx2_ssogws_dual_enq;
293                 event_dev->enqueue_burst        = otx2_ssogws_dual_enq_burst;
294                 event_dev->enqueue_new_burst    =
295                                         otx2_ssogws_dual_enq_new_burst;
296                 event_dev->enqueue_forward_burst =
297                                         otx2_ssogws_dual_enq_fwd_burst;
298
299                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
300                         event_dev->dequeue      = ssogws_dual_deq_seg
301                                 [!!(dev->rx_offloads &
302                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
303                                 [!!(dev->rx_offloads &
304                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
305                                 [!!(dev->rx_offloads &
306                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
307                                 [!!(dev->rx_offloads &
308                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
309                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
310                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
311                         event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
312                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
313                                 [!!(dev->rx_offloads &
314                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
315                                 [!!(dev->rx_offloads &
316                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
317                                 [!!(dev->rx_offloads &
318                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
319                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
320                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
321                         if (dev->is_timeout_deq) {
322                                 event_dev->dequeue      =
323                                         ssogws_dual_deq_seg_timeout
324                                         [!!(dev->rx_offloads &
325                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
326                                         [!!(dev->rx_offloads &
327                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
328                                         [!!(dev->rx_offloads &
329                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
330                                         [!!(dev->rx_offloads &
331                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
332                                         [!!(dev->rx_offloads &
333                                                         NIX_RX_OFFLOAD_PTYPE_F)]
334                                         [!!(dev->rx_offloads &
335                                                         NIX_RX_OFFLOAD_RSS_F)];
336                                 event_dev->dequeue_burst =
337                                         ssogws_dual_deq_seg_timeout_burst
338                                         [!!(dev->rx_offloads &
339                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
340                                         [!!(dev->rx_offloads &
341                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
342                                         [!!(dev->rx_offloads &
343                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
344                                         [!!(dev->rx_offloads &
345                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
346                                         [!!(dev->rx_offloads &
347                                                         NIX_RX_OFFLOAD_PTYPE_F)]
348                                         [!!(dev->rx_offloads &
349                                                         NIX_RX_OFFLOAD_RSS_F)];
350                         }
351                 } else {
352                         event_dev->dequeue              = ssogws_dual_deq
353                                 [!!(dev->rx_offloads &
354                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
355                                 [!!(dev->rx_offloads &
356                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
357                                 [!!(dev->rx_offloads &
358                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
359                                 [!!(dev->rx_offloads &
360                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
361                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
362                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
363                         event_dev->dequeue_burst        = ssogws_dual_deq_burst
364                                 [!!(dev->rx_offloads &
365                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
366                                 [!!(dev->rx_offloads &
367                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
368                                 [!!(dev->rx_offloads &
369                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
370                                 [!!(dev->rx_offloads &
371                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
372                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
373                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
374                         if (dev->is_timeout_deq) {
375                                 event_dev->dequeue      =
376                                         ssogws_dual_deq_timeout
377                                         [!!(dev->rx_offloads &
378                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
379                                         [!!(dev->rx_offloads &
380                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
381                                         [!!(dev->rx_offloads &
382                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
383                                         [!!(dev->rx_offloads &
384                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
385                                         [!!(dev->rx_offloads &
386                                                         NIX_RX_OFFLOAD_PTYPE_F)]
387                                         [!!(dev->rx_offloads &
388                                                         NIX_RX_OFFLOAD_RSS_F)];
389                                 event_dev->dequeue_burst =
390                                         ssogws_dual_deq_timeout_burst
391                                         [!!(dev->rx_offloads &
392                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
393                                         [!!(dev->rx_offloads &
394                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
395                                         [!!(dev->rx_offloads &
396                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
397                                         [!!(dev->rx_offloads &
398                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
399                                         [!!(dev->rx_offloads &
400                                                         NIX_RX_OFFLOAD_PTYPE_F)]
401                                         [!!(dev->rx_offloads &
402                                                         NIX_RX_OFFLOAD_RSS_F)];
403                         }
404                 }
405
406                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
407                 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
408                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
409                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
410                                 [!!(dev->tx_offloads &
411                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
412                                 [!!(dev->tx_offloads &
413                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
414                                 [!!(dev->tx_offloads &
415                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
416                                 [!!(dev->tx_offloads &
417                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
418                 } else {
419                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
420                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
421                                 [!!(dev->tx_offloads &
422                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
423                                 [!!(dev->tx_offloads &
424                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
425                                 [!!(dev->tx_offloads &
426                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
427                                 [!!(dev->tx_offloads &
428                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
429                 }
430         }
431         rte_mb();
432 }
433
434 static void
435 otx2_sso_info_get(struct rte_eventdev *event_dev,
436                   struct rte_event_dev_info *dev_info)
437 {
438         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
439
440         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
441         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
442         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
443         dev_info->max_event_queues = dev->max_event_queues;
444         dev_info->max_event_queue_flows = (1ULL << 20);
445         dev_info->max_event_queue_priority_levels = 8;
446         dev_info->max_event_priority_levels = 1;
447         dev_info->max_event_ports = dev->max_event_ports;
448         dev_info->max_event_port_dequeue_depth = 1;
449         dev_info->max_event_port_enqueue_depth = 1;
450         dev_info->max_num_events =  dev->max_num_events;
451         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
452                                         RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
453                                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
454                                         RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
455                                         RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
456                                         RTE_EVENT_DEV_CAP_NONSEQ_MODE;
457 }
458
459 static void
460 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
461 {
462         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
463         uint64_t val;
464
465         val = queue;
466         val |= 0ULL << 12; /* SET 0 */
467         val |= 0x8000800080000000; /* Dont modify rest of the masks */
468         val |= (uint64_t)enable << 14;   /* Enable/Disable Membership. */
469
470         otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
471 }
472
473 static int
474 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
475                    const uint8_t queues[], const uint8_t priorities[],
476                    uint16_t nb_links)
477 {
478         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
479         uint8_t port_id = 0;
480         uint16_t link;
481
482         RTE_SET_USED(priorities);
483         for (link = 0; link < nb_links; link++) {
484                 if (dev->dual_ws) {
485                         struct otx2_ssogws_dual *ws = port;
486
487                         port_id = ws->port;
488                         sso_port_link_modify((struct otx2_ssogws *)
489                                         &ws->ws_state[0], queues[link], true);
490                         sso_port_link_modify((struct otx2_ssogws *)
491                                         &ws->ws_state[1], queues[link], true);
492                 } else {
493                         struct otx2_ssogws *ws = port;
494
495                         port_id = ws->port;
496                         sso_port_link_modify(ws, queues[link], true);
497                 }
498         }
499         sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
500
501         return (int)nb_links;
502 }
503
504 static int
505 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
506                      uint8_t queues[], uint16_t nb_unlinks)
507 {
508         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
509         uint8_t port_id = 0;
510         uint16_t unlink;
511
512         for (unlink = 0; unlink < nb_unlinks; unlink++) {
513                 if (dev->dual_ws) {
514                         struct otx2_ssogws_dual *ws = port;
515
516                         port_id = ws->port;
517                         sso_port_link_modify((struct otx2_ssogws *)
518                                         &ws->ws_state[0], queues[unlink],
519                                         false);
520                         sso_port_link_modify((struct otx2_ssogws *)
521                                         &ws->ws_state[1], queues[unlink],
522                                         false);
523                 } else {
524                         struct otx2_ssogws *ws = port;
525
526                         port_id = ws->port;
527                         sso_port_link_modify(ws, queues[unlink], false);
528                 }
529         }
530         sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
531
532         return (int)nb_unlinks;
533 }
534
535 static int
536 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
537               uint16_t nb_lf, uint8_t attach)
538 {
539         if (attach) {
540                 struct rsrc_attach_req *req;
541
542                 req = otx2_mbox_alloc_msg_attach_resources(mbox);
543                 switch (type) {
544                 case SSO_LF_GGRP:
545                         req->sso = nb_lf;
546                         break;
547                 case SSO_LF_GWS:
548                         req->ssow = nb_lf;
549                         break;
550                 default:
551                         return -EINVAL;
552                 }
553                 req->modify = true;
554                 if (otx2_mbox_process(mbox) < 0)
555                         return -EIO;
556         } else {
557                 struct rsrc_detach_req *req;
558
559                 req = otx2_mbox_alloc_msg_detach_resources(mbox);
560                 switch (type) {
561                 case SSO_LF_GGRP:
562                         req->sso = true;
563                         break;
564                 case SSO_LF_GWS:
565                         req->ssow = true;
566                         break;
567                 default:
568                         return -EINVAL;
569                 }
570                 req->partial = true;
571                 if (otx2_mbox_process(mbox) < 0)
572                         return -EIO;
573         }
574
575         return 0;
576 }
577
578 static int
579 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
580            enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
581 {
582         void *rsp;
583         int rc;
584
585         if (alloc) {
586                 switch (type) {
587                 case SSO_LF_GGRP:
588                         {
589                         struct sso_lf_alloc_req *req_ggrp;
590                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
591                         req_ggrp->hwgrps = nb_lf;
592                         }
593                         break;
594                 case SSO_LF_GWS:
595                         {
596                         struct ssow_lf_alloc_req *req_hws;
597                         req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
598                         req_hws->hws = nb_lf;
599                         }
600                         break;
601                 default:
602                         return -EINVAL;
603                 }
604         } else {
605                 switch (type) {
606                 case SSO_LF_GGRP:
607                         {
608                         struct sso_lf_free_req *req_ggrp;
609                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
610                         req_ggrp->hwgrps = nb_lf;
611                         }
612                         break;
613                 case SSO_LF_GWS:
614                         {
615                         struct ssow_lf_free_req *req_hws;
616                         req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
617                         req_hws->hws = nb_lf;
618                         }
619                         break;
620                 default:
621                         return -EINVAL;
622                 }
623         }
624
625         rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
626         if (rc < 0)
627                 return rc;
628
629         if (alloc && type == SSO_LF_GGRP) {
630                 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
631
632                 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
633                 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
634                 dev->iue = rsp_ggrp->in_unit_entries;
635         }
636
637         return 0;
638 }
639
640 static void
641 otx2_sso_port_release(void *port)
642 {
643         rte_free(port);
644 }
645
646 static void
647 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
648 {
649         RTE_SET_USED(event_dev);
650         RTE_SET_USED(queue_id);
651 }
652
653 static void
654 sso_clr_links(const struct rte_eventdev *event_dev)
655 {
656         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
657         int i, j;
658
659         for (i = 0; i < dev->nb_event_ports; i++) {
660                 if (dev->dual_ws) {
661                         struct otx2_ssogws_dual *ws;
662
663                         ws = event_dev->data->ports[i];
664                         for (j = 0; j < dev->nb_event_queues; j++) {
665                                 sso_port_link_modify((struct otx2_ssogws *)
666                                                 &ws->ws_state[0], j, false);
667                                 sso_port_link_modify((struct otx2_ssogws *)
668                                                 &ws->ws_state[1], j, false);
669                         }
670                 } else {
671                         struct otx2_ssogws *ws;
672
673                         ws = event_dev->data->ports[i];
674                         for (j = 0; j < dev->nb_event_queues; j++)
675                                 sso_port_link_modify(ws, j, false);
676                 }
677         }
678 }
679
680 static void
681 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
682 {
683         ws->tag_op              = base + SSOW_LF_GWS_TAG;
684         ws->wqp_op              = base + SSOW_LF_GWS_WQP;
685         ws->getwrk_op           = base + SSOW_LF_GWS_OP_GET_WORK;
686         ws->swtp_op             = base + SSOW_LF_GWS_SWTP;
687         ws->swtag_norm_op       = base + SSOW_LF_GWS_OP_SWTAG_NORM;
688         ws->swtag_desched_op    = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
689 }
690
691 static int
692 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
693 {
694         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
695         struct otx2_mbox *mbox = dev->mbox;
696         uint8_t vws = 0;
697         uint8_t nb_lf;
698         int i, rc;
699
700         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
701
702         nb_lf = dev->nb_event_ports * 2;
703         /* Ask AF to attach required LFs. */
704         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
705         if (rc < 0) {
706                 otx2_err("Failed to attach SSO GWS LF");
707                 return -ENODEV;
708         }
709
710         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
711                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
712                 otx2_err("Failed to init SSO GWS LF");
713                 return -ENODEV;
714         }
715
716         for (i = 0; i < dev->nb_event_ports; i++) {
717                 struct otx2_ssogws_dual *ws;
718                 uintptr_t base;
719
720                 /* Free memory prior to re-allocation if needed */
721                 if (event_dev->data->ports[i] != NULL) {
722                         ws = event_dev->data->ports[i];
723                         rte_free(ws);
724                         ws = NULL;
725                 }
726
727                 /* Allocate event port memory */
728                 ws = rte_zmalloc_socket("otx2_sso_ws",
729                                         sizeof(struct otx2_ssogws_dual),
730                                         RTE_CACHE_LINE_SIZE,
731                                         event_dev->data->socket_id);
732                 if (ws == NULL) {
733                         otx2_err("Failed to alloc memory for port=%d", i);
734                         rc = -ENOMEM;
735                         break;
736                 }
737
738                 ws->port = i;
739                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
740                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
741                 vws++;
742
743                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
744                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
745                 vws++;
746
747                 event_dev->data->ports[i] = ws;
748         }
749
750         if (rc < 0) {
751                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
752                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
753         }
754
755         return rc;
756 }
757
758 static int
759 sso_configure_ports(const struct rte_eventdev *event_dev)
760 {
761         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
762         struct otx2_mbox *mbox = dev->mbox;
763         uint8_t nb_lf;
764         int i, rc;
765
766         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
767
768         nb_lf = dev->nb_event_ports;
769         /* Ask AF to attach required LFs. */
770         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
771         if (rc < 0) {
772                 otx2_err("Failed to attach SSO GWS LF");
773                 return -ENODEV;
774         }
775
776         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
777                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
778                 otx2_err("Failed to init SSO GWS LF");
779                 return -ENODEV;
780         }
781
782         for (i = 0; i < nb_lf; i++) {
783                 struct otx2_ssogws *ws;
784                 uintptr_t base;
785
786                 /* Free memory prior to re-allocation if needed */
787                 if (event_dev->data->ports[i] != NULL) {
788                         ws = event_dev->data->ports[i];
789                         rte_free(ws);
790                         ws = NULL;
791                 }
792
793                 /* Allocate event port memory */
794                 ws = rte_zmalloc_socket("otx2_sso_ws",
795                                         sizeof(struct otx2_ssogws),
796                                         RTE_CACHE_LINE_SIZE,
797                                         event_dev->data->socket_id);
798                 if (ws == NULL) {
799                         otx2_err("Failed to alloc memory for port=%d", i);
800                         rc = -ENOMEM;
801                         break;
802                 }
803
804                 ws->port = i;
805                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
806                 sso_set_port_ops(ws, base);
807
808                 event_dev->data->ports[i] = ws;
809         }
810
811         if (rc < 0) {
812                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
813                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
814         }
815
816         return rc;
817 }
818
819 static int
820 sso_configure_queues(const struct rte_eventdev *event_dev)
821 {
822         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
823         struct otx2_mbox *mbox = dev->mbox;
824         uint8_t nb_lf;
825         int rc;
826
827         otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
828
829         nb_lf = dev->nb_event_queues;
830         /* Ask AF to attach required LFs. */
831         rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
832         if (rc < 0) {
833                 otx2_err("Failed to attach SSO GGRP LF");
834                 return -ENODEV;
835         }
836
837         if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
838                 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
839                 otx2_err("Failed to init SSO GGRP LF");
840                 return -ENODEV;
841         }
842
843         return rc;
844 }
845
846 static int
847 sso_xaq_allocate(struct otx2_sso_evdev *dev)
848 {
849         const struct rte_memzone *mz;
850         struct npa_aura_s *aura;
851         static int reconfig_cnt;
852         char pool_name[RTE_MEMZONE_NAMESIZE];
853         uint32_t xaq_cnt;
854         int rc;
855
856         if (dev->xaq_pool)
857                 rte_mempool_free(dev->xaq_pool);
858
859         /*
860          * Allocate memory for Add work backpressure.
861          */
862         mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
863         if (mz == NULL)
864                 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
865                                                  OTX2_ALIGN +
866                                                  sizeof(struct npa_aura_s),
867                                                  rte_socket_id(),
868                                                  RTE_MEMZONE_IOVA_CONTIG,
869                                                  OTX2_ALIGN);
870         if (mz == NULL) {
871                 otx2_err("Failed to allocate mem for fcmem");
872                 return -ENOMEM;
873         }
874
875         dev->fc_iova = mz->iova;
876         dev->fc_mem = mz->addr;
877
878         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
879         memset(aura, 0, sizeof(struct npa_aura_s));
880
881         aura->fc_ena = 1;
882         aura->fc_addr = dev->fc_iova;
883         aura->fc_hyst_bits = 0; /* Store count on all updates */
884
885         /* Taken from HRM 14.3.3(4) */
886         xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
887         if (dev->xae_cnt)
888                 xaq_cnt += dev->xae_cnt / dev->xae_waes;
889         else if (dev->adptr_xae_cnt)
890                 xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
891                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
892         else
893                 xaq_cnt += (dev->iue / dev->xae_waes) +
894                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
895
896         otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
897         /* Setup XAQ based on number of nb queues. */
898         snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
899         dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
900                         xaq_cnt, dev->xaq_buf_size, 0, 0,
901                         rte_socket_id(), 0);
902
903         if (dev->xaq_pool == NULL) {
904                 otx2_err("Unable to create empty mempool.");
905                 rte_memzone_free(mz);
906                 return -ENOMEM;
907         }
908
909         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
910                                         rte_mbuf_platform_mempool_ops(), aura);
911         if (rc != 0) {
912                 otx2_err("Unable to set xaqpool ops.");
913                 goto alloc_fail;
914         }
915
916         rc = rte_mempool_populate_default(dev->xaq_pool);
917         if (rc < 0) {
918                 otx2_err("Unable to set populate xaqpool.");
919                 goto alloc_fail;
920         }
921         reconfig_cnt++;
922         /* When SW does addwork (enqueue) check if there is space in XAQ by
923          * comparing fc_addr above against the xaq_lmt calculated below.
924          * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
925          * to request XAQ to cache them even before enqueue is called.
926          */
927         dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
928                                   dev->nb_event_queues);
929         dev->nb_xaq_cfg = xaq_cnt;
930
931         return 0;
932 alloc_fail:
933         rte_mempool_free(dev->xaq_pool);
934         rte_memzone_free(mz);
935         return rc;
936 }
937
938 static int
939 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
940 {
941         struct otx2_mbox *mbox = dev->mbox;
942         struct sso_hw_setconfig *req;
943
944         otx2_sso_dbg("Configuring XAQ for GGRPs");
945         req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
946         req->npa_pf_func = otx2_npa_pf_func_get();
947         req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
948         req->hwgrps = dev->nb_event_queues;
949
950         return otx2_mbox_process(mbox);
951 }
952
953 static void
954 sso_lf_teardown(struct otx2_sso_evdev *dev,
955                 enum otx2_sso_lf_type lf_type)
956 {
957         uint8_t nb_lf;
958
959         switch (lf_type) {
960         case SSO_LF_GGRP:
961                 nb_lf = dev->nb_event_queues;
962                 break;
963         case SSO_LF_GWS:
964                 nb_lf = dev->nb_event_ports;
965                 nb_lf *= dev->dual_ws ? 2 : 1;
966                 break;
967         default:
968                 return;
969         }
970
971         sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
972         sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
973 }
974
975 static int
976 otx2_sso_configure(const struct rte_eventdev *event_dev)
977 {
978         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
979         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
980         uint32_t deq_tmo_ns;
981         int rc;
982
983         sso_func_trace();
984         deq_tmo_ns = conf->dequeue_timeout_ns;
985
986         if (deq_tmo_ns == 0)
987                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
988
989         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
990             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
991                 otx2_err("Unsupported dequeue timeout requested");
992                 return -EINVAL;
993         }
994
995         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
996                 dev->is_timeout_deq = 1;
997
998         dev->deq_tmo_ns = deq_tmo_ns;
999
1000         if (conf->nb_event_ports > dev->max_event_ports ||
1001             conf->nb_event_queues > dev->max_event_queues) {
1002                 otx2_err("Unsupported event queues/ports requested");
1003                 return -EINVAL;
1004         }
1005
1006         if (conf->nb_event_port_dequeue_depth > 1) {
1007                 otx2_err("Unsupported event port deq depth requested");
1008                 return -EINVAL;
1009         }
1010
1011         if (conf->nb_event_port_enqueue_depth > 1) {
1012                 otx2_err("Unsupported event port enq depth requested");
1013                 return -EINVAL;
1014         }
1015
1016         if (dev->configured)
1017                 sso_unregister_irqs(event_dev);
1018
1019         if (dev->nb_event_queues) {
1020                 /* Finit any previous queues. */
1021                 sso_lf_teardown(dev, SSO_LF_GGRP);
1022         }
1023         if (dev->nb_event_ports) {
1024                 /* Finit any previous ports. */
1025                 sso_lf_teardown(dev, SSO_LF_GWS);
1026         }
1027
1028         dev->nb_event_queues = conf->nb_event_queues;
1029         dev->nb_event_ports = conf->nb_event_ports;
1030
1031         if (dev->dual_ws)
1032                 rc = sso_configure_dual_ports(event_dev);
1033         else
1034                 rc = sso_configure_ports(event_dev);
1035
1036         if (rc < 0) {
1037                 otx2_err("Failed to configure event ports");
1038                 return -ENODEV;
1039         }
1040
1041         if (sso_configure_queues(event_dev) < 0) {
1042                 otx2_err("Failed to configure event queues");
1043                 rc = -ENODEV;
1044                 goto teardown_hws;
1045         }
1046
1047         if (sso_xaq_allocate(dev) < 0) {
1048                 rc = -ENOMEM;
1049                 goto teardown_hwggrp;
1050         }
1051
1052         /* Clear any prior port-queue mapping. */
1053         sso_clr_links(event_dev);
1054         rc = sso_ggrp_alloc_xaq(dev);
1055         if (rc < 0) {
1056                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1057                 goto teardown_hwggrp;
1058         }
1059
1060         rc = sso_get_msix_offsets(event_dev);
1061         if (rc < 0) {
1062                 otx2_err("Failed to get msix offsets %d", rc);
1063                 goto teardown_hwggrp;
1064         }
1065
1066         rc = sso_register_irqs(event_dev);
1067         if (rc < 0) {
1068                 otx2_err("Failed to register irq %d", rc);
1069                 goto teardown_hwggrp;
1070         }
1071
1072         dev->configured = 1;
1073         rte_mb();
1074
1075         return 0;
1076 teardown_hwggrp:
1077         sso_lf_teardown(dev, SSO_LF_GGRP);
1078 teardown_hws:
1079         sso_lf_teardown(dev, SSO_LF_GWS);
1080         dev->nb_event_queues = 0;
1081         dev->nb_event_ports = 0;
1082         dev->configured = 0;
1083         return rc;
1084 }
1085
1086 static void
1087 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1088                         struct rte_event_queue_conf *queue_conf)
1089 {
1090         RTE_SET_USED(event_dev);
1091         RTE_SET_USED(queue_id);
1092
1093         queue_conf->nb_atomic_flows = (1ULL << 20);
1094         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1095         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1096         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1097 }
1098
1099 static int
1100 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1101                      const struct rte_event_queue_conf *queue_conf)
1102 {
1103         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1104         struct otx2_mbox *mbox = dev->mbox;
1105         struct sso_grp_priority *req;
1106         int rc;
1107
1108         sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1109
1110         req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1111         req->grp = queue_id;
1112         req->weight = 0xFF;
1113         req->affinity = 0xFF;
1114         /* Normalize <0-255> to <0-7> */
1115         req->priority = queue_conf->priority / 32;
1116
1117         rc = otx2_mbox_process(mbox);
1118         if (rc < 0) {
1119                 otx2_err("Failed to set priority queue=%d", queue_id);
1120                 return rc;
1121         }
1122
1123         return 0;
1124 }
1125
1126 static void
1127 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1128                        struct rte_event_port_conf *port_conf)
1129 {
1130         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1131
1132         RTE_SET_USED(port_id);
1133         port_conf->new_event_threshold = dev->max_num_events;
1134         port_conf->dequeue_depth = 1;
1135         port_conf->enqueue_depth = 1;
1136 }
1137
1138 static int
1139 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1140                     const struct rte_event_port_conf *port_conf)
1141 {
1142         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1143         uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1144         uint64_t val;
1145         uint16_t q;
1146
1147         sso_func_trace("Port=%d", port_id);
1148         RTE_SET_USED(port_conf);
1149
1150         if (event_dev->data->ports[port_id] == NULL) {
1151                 otx2_err("Invalid port Id %d", port_id);
1152                 return -EINVAL;
1153         }
1154
1155         for (q = 0; q < dev->nb_event_queues; q++) {
1156                 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1157                 if (grps_base[q] == 0) {
1158                         otx2_err("Failed to get grp[%d] base addr", q);
1159                         return -EINVAL;
1160                 }
1161         }
1162
1163         /* Set get_work timeout for HWS */
1164         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1165
1166         if (dev->dual_ws) {
1167                 struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1168
1169                 rte_memcpy(ws->grps_base, grps_base,
1170                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1171                 ws->fc_mem = dev->fc_mem;
1172                 ws->xaq_lmt = dev->xaq_lmt;
1173                 ws->tstamp = dev->tstamp;
1174                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1175                              ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1176                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1177                              ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1178         } else {
1179                 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1180                 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1181
1182                 rte_memcpy(ws->grps_base, grps_base,
1183                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1184                 ws->fc_mem = dev->fc_mem;
1185                 ws->xaq_lmt = dev->xaq_lmt;
1186                 ws->tstamp = dev->tstamp;
1187                 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1188         }
1189
1190         otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1191
1192         return 0;
1193 }
1194
1195 static int
1196 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1197                        uint64_t *tmo_ticks)
1198 {
1199         RTE_SET_USED(event_dev);
1200         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1201
1202         return 0;
1203 }
1204
1205 static void
1206 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1207 {
1208         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1209
1210         fprintf(f, "SSOW_LF_GWS Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1211         fprintf(f, "SSOW_LF_GWS_LINKS       0x%" PRIx64 "\n",
1212                 otx2_read64(base + SSOW_LF_GWS_LINKS));
1213         fprintf(f, "SSOW_LF_GWS_PENDWQP     0x%" PRIx64 "\n",
1214                 otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1215         fprintf(f, "SSOW_LF_GWS_PENDSTATE   0x%" PRIx64 "\n",
1216                 otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1217         fprintf(f, "SSOW_LF_GWS_NW_TIM      0x%" PRIx64 "\n",
1218                 otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1219         fprintf(f, "SSOW_LF_GWS_TAG         0x%" PRIx64 "\n",
1220                 otx2_read64(base + SSOW_LF_GWS_TAG));
1221         fprintf(f, "SSOW_LF_GWS_WQP         0x%" PRIx64 "\n",
1222                 otx2_read64(base + SSOW_LF_GWS_TAG));
1223         fprintf(f, "SSOW_LF_GWS_SWTP        0x%" PRIx64 "\n",
1224                 otx2_read64(base + SSOW_LF_GWS_SWTP));
1225         fprintf(f, "SSOW_LF_GWS_PENDTAG     0x%" PRIx64 "\n",
1226                 otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1227 }
1228
1229 static void
1230 ssoggrp_dump(uintptr_t base, FILE *f)
1231 {
1232         fprintf(f, "SSO_LF_GGRP Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1233         fprintf(f, "SSO_LF_GGRP_QCTL        0x%" PRIx64 "\n",
1234                 otx2_read64(base + SSO_LF_GGRP_QCTL));
1235         fprintf(f, "SSO_LF_GGRP_XAQ_CNT     0x%" PRIx64 "\n",
1236                 otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1237         fprintf(f, "SSO_LF_GGRP_INT_THR     0x%" PRIx64 "\n",
1238                 otx2_read64(base + SSO_LF_GGRP_INT_THR));
1239         fprintf(f, "SSO_LF_GGRP_INT_CNT     0x%" PRIX64 "\n",
1240                 otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1241         fprintf(f, "SSO_LF_GGRP_AQ_CNT      0x%" PRIX64 "\n",
1242                 otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1243         fprintf(f, "SSO_LF_GGRP_AQ_THR      0x%" PRIX64 "\n",
1244                 otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1245         fprintf(f, "SSO_LF_GGRP_MISC_CNT    0x%" PRIx64 "\n",
1246                 otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1247 }
1248
1249 static void
1250 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1251 {
1252         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1253         uint8_t queue;
1254         uint8_t port;
1255
1256         fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1257                 "dual_ws" : "single_ws");
1258         /* Dump SSOW registers */
1259         for (port = 0; port < dev->nb_event_ports; port++) {
1260                 if (dev->dual_ws) {
1261                         struct otx2_ssogws_dual *ws =
1262                                 event_dev->data->ports[port];
1263
1264                         fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1265                                 __func__, port, 0);
1266                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1267                         fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1268                                 __func__, port, 1);
1269                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1270                 } else {
1271                         fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1272                                 __func__, port);
1273                         ssogws_dump(event_dev->data->ports[port], f);
1274                 }
1275         }
1276
1277         /* Dump SSO registers */
1278         for (queue = 0; queue < dev->nb_event_queues; queue++) {
1279                 fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1280                 if (dev->dual_ws) {
1281                         struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1282                         ssoggrp_dump(ws->grps_base[queue], f);
1283                 } else {
1284                         struct otx2_ssogws *ws = event_dev->data->ports[0];
1285                         ssoggrp_dump(ws->grps_base[queue], f);
1286                 }
1287         }
1288 }
1289
1290 static void
1291 otx2_handle_event(void *arg, struct rte_event event)
1292 {
1293         struct rte_eventdev *event_dev = arg;
1294
1295         if (event_dev->dev_ops->dev_stop_flush != NULL)
1296                 event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1297                                 event, event_dev->data->dev_stop_flush_arg);
1298 }
1299
1300 static void
1301 sso_qos_cfg(struct rte_eventdev *event_dev)
1302 {
1303         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1304         struct sso_grp_qos_cfg *req;
1305         uint16_t i;
1306
1307         for (i = 0; i < dev->qos_queue_cnt; i++) {
1308                 uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1309                 uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1310                 uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1311
1312                 if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1313                         continue;
1314
1315                 req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1316                 req->xaq_limit = (dev->nb_xaq_cfg *
1317                                   (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1318                 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1319                                 (iaq_prcnt ? iaq_prcnt : 100)) / 100;
1320                 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1321                                 (taq_prcnt ? taq_prcnt : 100)) / 100;
1322         }
1323
1324         if (dev->qos_queue_cnt)
1325                 otx2_mbox_process(dev->mbox);
1326 }
1327
1328 static void
1329 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1330 {
1331         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1332         uint16_t i;
1333
1334         for (i = 0; i < dev->nb_event_ports; i++) {
1335                 if (dev->dual_ws) {
1336                         struct otx2_ssogws_dual *ws;
1337
1338                         ws = event_dev->data->ports[i];
1339                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1340                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1341                         ws->swtag_req = 0;
1342                         ws->vws = 0;
1343                         ws->ws_state[0].cur_grp = 0;
1344                         ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1345                         ws->ws_state[1].cur_grp = 0;
1346                         ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
1347                 } else {
1348                         struct otx2_ssogws *ws;
1349
1350                         ws = event_dev->data->ports[i];
1351                         ssogws_reset(ws);
1352                         ws->swtag_req = 0;
1353                         ws->cur_grp = 0;
1354                         ws->cur_tt = SSO_SYNC_EMPTY;
1355                 }
1356         }
1357
1358         rte_mb();
1359         if (dev->dual_ws) {
1360                 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1361                 struct otx2_ssogws temp_ws;
1362
1363                 memcpy(&temp_ws, &ws->ws_state[0],
1364                        sizeof(struct otx2_ssogws_state));
1365                 for (i = 0; i < dev->nb_event_queues; i++) {
1366                         /* Consume all the events through HWS0 */
1367                         ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1368                                             otx2_handle_event, event_dev);
1369                         /* Enable/Disable SSO GGRP */
1370                         otx2_write64(enable, ws->grps_base[i] +
1371                                      SSO_LF_GGRP_QCTL);
1372                 }
1373                 ws->ws_state[0].cur_grp = 0;
1374                 ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1375         } else {
1376                 struct otx2_ssogws *ws = event_dev->data->ports[0];
1377
1378                 for (i = 0; i < dev->nb_event_queues; i++) {
1379                         /* Consume all the events through HWS0 */
1380                         ssogws_flush_events(ws, i, ws->grps_base[i],
1381                                             otx2_handle_event, event_dev);
1382                         /* Enable/Disable SSO GGRP */
1383                         otx2_write64(enable, ws->grps_base[i] +
1384                                      SSO_LF_GGRP_QCTL);
1385                 }
1386                 ws->cur_grp = 0;
1387                 ws->cur_tt = SSO_SYNC_EMPTY;
1388         }
1389
1390         /* reset SSO GWS cache */
1391         otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1392         otx2_mbox_process(dev->mbox);
1393 }
1394
1395 int
1396 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1397 {
1398         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1399         struct rte_mempool *prev_xaq_pool;
1400         int rc = 0;
1401
1402         if (event_dev->data->dev_started)
1403                 sso_cleanup(event_dev, 0);
1404
1405         prev_xaq_pool = dev->xaq_pool;
1406         dev->xaq_pool = NULL;
1407         sso_xaq_allocate(dev);
1408         rc = sso_ggrp_alloc_xaq(dev);
1409         if (rc < 0) {
1410                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1411                 rte_mempool_free(prev_xaq_pool);
1412                 return rc;
1413         }
1414
1415         rte_mempool_free(prev_xaq_pool);
1416         rte_mb();
1417         if (event_dev->data->dev_started)
1418                 sso_cleanup(event_dev, 1);
1419
1420         return 0;
1421 }
1422
1423 static int
1424 otx2_sso_start(struct rte_eventdev *event_dev)
1425 {
1426         sso_func_trace();
1427         sso_qos_cfg(event_dev);
1428         sso_cleanup(event_dev, 1);
1429         sso_fastpath_fns_set(event_dev);
1430
1431         return 0;
1432 }
1433
1434 static void
1435 otx2_sso_stop(struct rte_eventdev *event_dev)
1436 {
1437         sso_func_trace();
1438         sso_cleanup(event_dev, 0);
1439         rte_mb();
1440 }
1441
1442 static int
1443 otx2_sso_close(struct rte_eventdev *event_dev)
1444 {
1445         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1446         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1447         uint16_t i;
1448
1449         if (!dev->configured)
1450                 return 0;
1451
1452         sso_unregister_irqs(event_dev);
1453
1454         for (i = 0; i < dev->nb_event_queues; i++)
1455                 all_queues[i] = i;
1456
1457         for (i = 0; i < dev->nb_event_ports; i++)
1458                 otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1459                                      all_queues, dev->nb_event_queues);
1460
1461         sso_lf_teardown(dev, SSO_LF_GGRP);
1462         sso_lf_teardown(dev, SSO_LF_GWS);
1463         dev->nb_event_ports = 0;
1464         dev->nb_event_queues = 0;
1465         rte_mempool_free(dev->xaq_pool);
1466         rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1467
1468         return 0;
1469 }
1470
1471 /* Initialize and register event driver with DPDK Application */
1472 static struct rte_eventdev_ops otx2_sso_ops = {
1473         .dev_infos_get    = otx2_sso_info_get,
1474         .dev_configure    = otx2_sso_configure,
1475         .queue_def_conf   = otx2_sso_queue_def_conf,
1476         .queue_setup      = otx2_sso_queue_setup,
1477         .queue_release    = otx2_sso_queue_release,
1478         .port_def_conf    = otx2_sso_port_def_conf,
1479         .port_setup       = otx2_sso_port_setup,
1480         .port_release     = otx2_sso_port_release,
1481         .port_link        = otx2_sso_port_link,
1482         .port_unlink      = otx2_sso_port_unlink,
1483         .timeout_ticks    = otx2_sso_timeout_ticks,
1484
1485         .eth_rx_adapter_caps_get  = otx2_sso_rx_adapter_caps_get,
1486         .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1487         .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1488         .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1489         .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1490
1491         .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1492         .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1493         .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1494
1495         .timer_adapter_caps_get = otx2_tim_caps_get,
1496
1497         .xstats_get       = otx2_sso_xstats_get,
1498         .xstats_reset     = otx2_sso_xstats_reset,
1499         .xstats_get_names = otx2_sso_xstats_get_names,
1500
1501         .dump             = otx2_sso_dump,
1502         .dev_start        = otx2_sso_start,
1503         .dev_stop         = otx2_sso_stop,
1504         .dev_close        = otx2_sso_close,
1505         .dev_selftest     = otx2_sso_selftest,
1506 };
1507
1508 #define OTX2_SSO_XAE_CNT        "xae_cnt"
1509 #define OTX2_SSO_SINGLE_WS      "single_ws"
1510 #define OTX2_SSO_GGRP_QOS       "qos"
1511 #define OTX2_SSO_SELFTEST       "selftest"
1512
1513 static void
1514 parse_queue_param(char *value, void *opaque)
1515 {
1516         struct otx2_sso_qos queue_qos = {0};
1517         uint8_t *val = (uint8_t *)&queue_qos;
1518         struct otx2_sso_evdev *dev = opaque;
1519         char *tok = strtok(value, "-");
1520
1521         if (!strlen(value))
1522                 return;
1523
1524         while (tok != NULL) {
1525                 *val = atoi(tok);
1526                 tok = strtok(NULL, "-");
1527                 val++;
1528         }
1529
1530         if (val != (&queue_qos.iaq_prcnt + 1)) {
1531                 otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1532                 return;
1533         }
1534
1535         dev->qos_queue_cnt++;
1536         dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1537                                           sizeof(struct otx2_sso_qos) *
1538                                           dev->qos_queue_cnt, 0);
1539         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1540 }
1541
1542 static void
1543 parse_qos_list(const char *value, void *opaque)
1544 {
1545         char *s = strdup(value);
1546         char *start = NULL;
1547         char *end = NULL;
1548         char *f = s;
1549
1550         while (*s) {
1551                 if (*s == '[')
1552                         start = s;
1553                 else if (*s == ']')
1554                         end = s;
1555
1556                 if (start < end && *start) {
1557                         *end = 0;
1558                         parse_queue_param(start + 1, opaque);
1559                         s = end;
1560                         start = end;
1561                 }
1562                 s++;
1563         }
1564
1565         free(f);
1566 }
1567
1568 static int
1569 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1570 {
1571         RTE_SET_USED(key);
1572
1573         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1574          * isn't allowed. Everything is expressed in percentages, 0 represents
1575          * default.
1576          */
1577         parse_qos_list(value, opaque);
1578
1579         return 0;
1580 }
1581
1582 static void
1583 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1584 {
1585         struct rte_kvargs *kvlist;
1586         uint8_t single_ws = 0;
1587
1588         if (devargs == NULL)
1589                 return;
1590         kvlist = rte_kvargs_parse(devargs->args, NULL);
1591         if (kvlist == NULL)
1592                 return;
1593
1594         rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,
1595                            &dev->selftest);
1596         rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1597                            &dev->xae_cnt);
1598         rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1599                            &single_ws);
1600         rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1601                            dev);
1602
1603         dev->dual_ws = !single_ws;
1604         rte_kvargs_free(kvlist);
1605 }
1606
1607 static int
1608 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1609 {
1610         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1611                                        sizeof(struct otx2_sso_evdev),
1612                                        otx2_sso_init);
1613 }
1614
1615 static int
1616 otx2_sso_remove(struct rte_pci_device *pci_dev)
1617 {
1618         return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1619 }
1620
1621 static const struct rte_pci_id pci_sso_map[] = {
1622         {
1623                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1624                                PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1625         },
1626         {
1627                 .vendor_id = 0,
1628         },
1629 };
1630
1631 static struct rte_pci_driver pci_sso = {
1632         .id_table = pci_sso_map,
1633         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1634         .probe = otx2_sso_probe,
1635         .remove = otx2_sso_remove,
1636 };
1637
1638 int
1639 otx2_sso_init(struct rte_eventdev *event_dev)
1640 {
1641         struct free_rsrcs_rsp *rsrc_cnt;
1642         struct rte_pci_device *pci_dev;
1643         struct otx2_sso_evdev *dev;
1644         int rc;
1645
1646         event_dev->dev_ops = &otx2_sso_ops;
1647         /* For secondary processes, the primary has done all the work */
1648         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1649                 sso_fastpath_fns_set(event_dev);
1650                 return 0;
1651         }
1652
1653         dev = sso_pmd_priv(event_dev);
1654
1655         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1656
1657         /* Initialize the base otx2_dev object */
1658         rc = otx2_dev_init(pci_dev, dev);
1659         if (rc < 0) {
1660                 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1661                 goto error;
1662         }
1663
1664         /* Get SSO and SSOW MSIX rsrc cnt */
1665         otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1666         rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1667         if (rc < 0) {
1668                 otx2_err("Unable to get free rsrc count");
1669                 goto otx2_dev_uninit;
1670         }
1671         otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1672                      rsrc_cnt->ssow, rsrc_cnt->npa);
1673
1674         dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1675         dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1676         /* Grab the NPA LF if required */
1677         rc = otx2_npa_lf_init(pci_dev, dev);
1678         if (rc < 0) {
1679                 otx2_err("Unable to init NPA lf. It might not be provisioned");
1680                 goto otx2_dev_uninit;
1681         }
1682
1683         dev->drv_inited = true;
1684         dev->is_timeout_deq = 0;
1685         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1686         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1687         dev->max_num_events = -1;
1688         dev->nb_event_queues = 0;
1689         dev->nb_event_ports = 0;
1690
1691         if (!dev->max_event_ports || !dev->max_event_queues) {
1692                 otx2_err("Not enough eventdev resource queues=%d ports=%d",
1693                          dev->max_event_queues, dev->max_event_ports);
1694                 rc = -ENODEV;
1695                 goto otx2_npa_lf_uninit;
1696         }
1697
1698         dev->dual_ws = 1;
1699         sso_parse_devargs(dev, pci_dev->device.devargs);
1700         if (dev->dual_ws) {
1701                 otx2_sso_dbg("Using dual workslot mode");
1702                 dev->max_event_ports = dev->max_event_ports / 2;
1703         } else {
1704                 otx2_sso_dbg("Using single workslot mode");
1705         }
1706
1707         otx2_sso_pf_func_set(dev->pf_func);
1708         otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1709                      event_dev->data->name, dev->max_event_queues,
1710                      dev->max_event_ports);
1711         if (dev->selftest) {
1712                 event_dev->dev->driver = &pci_sso.driver;
1713                 event_dev->dev_ops->dev_selftest();
1714         }
1715
1716         otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1717
1718         return 0;
1719
1720 otx2_npa_lf_uninit:
1721         otx2_npa_lf_fini();
1722 otx2_dev_uninit:
1723         otx2_dev_fini(pci_dev, dev);
1724 error:
1725         return rc;
1726 }
1727
1728 int
1729 otx2_sso_fini(struct rte_eventdev *event_dev)
1730 {
1731         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1732         struct rte_pci_device *pci_dev;
1733
1734         /* For secondary processes, nothing to be done */
1735         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1736                 return 0;
1737
1738         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1739
1740         if (!dev->drv_inited)
1741                 goto dev_fini;
1742
1743         dev->drv_inited = false;
1744         otx2_npa_lf_fini();
1745
1746 dev_fini:
1747         if (otx2_npa_lf_active(dev)) {
1748                 otx2_info("Common resource in use by other devices");
1749                 return -EAGAIN;
1750         }
1751
1752         otx2_tim_fini();
1753         otx2_dev_fini(pci_dev, dev);
1754
1755         return 0;
1756 }
1757
1758 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1759 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1760 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1761 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1762                               OTX2_SSO_SINGLE_WS "=1"
1763                               OTX2_SSO_GGRP_QOS "=<string>"
1764                               OTX2_SSO_SELFTEST "=1");