net/mlx5: support connection tracking between two ports
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
9 #include <rte_eal.h>
10 #include <eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_pci.h>
14
15 #include "otx2_evdev.h"
16 #include "otx2_evdev_crypto_adptr_tx.h"
17 #include "otx2_evdev_stats.h"
18 #include "otx2_irq.h"
19 #include "otx2_tim_evdev.h"
20
21 static inline int
22 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
23 {
24         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
25         uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
26         struct otx2_mbox *mbox = dev->mbox;
27         struct msix_offset_rsp *msix_rsp;
28         int i, rc;
29
30         /* Get SSO and SSOW MSIX vector offsets */
31         otx2_mbox_alloc_msg_msix_offset(mbox);
32         rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
33
34         for (i = 0; i < nb_ports; i++)
35                 dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
36
37         for (i = 0; i < dev->nb_event_queues; i++)
38                 dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
39
40         return rc;
41 }
42
43 void
44 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
45 {
46         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
47         /* Single WS modes */
48         const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = {
49 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
50                 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
51 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
52 #undef R
53         };
54
55         const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = {
56 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
57                 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
58 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
59 #undef R
60         };
61
62         const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = {
63 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
64                 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
65 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
66 #undef R
67         };
68
69         const event_dequeue_burst_t
70                 ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = {
71 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
72                 [f6][f5][f4][f3][f2][f1][f0] =                          \
73                         otx2_ssogws_deq_timeout_burst_ ##name,
74 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
75 #undef R
76         };
77
78         const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = {
79 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
80                 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
81 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
82 #undef R
83         };
84
85         const event_dequeue_burst_t
86                 ssogws_deq_seg_burst[2][2][2][2][2][2][2] = {
87 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
88                 [f6][f5][f4][f3][f2][f1][f0] =                          \
89                         otx2_ssogws_deq_seg_burst_ ##name,
90 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
91 #undef R
92         };
93
94         const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = {
95 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
96                 [f6][f5][f4][f3][f2][f1][f0] =                          \
97                         otx2_ssogws_deq_seg_timeout_ ##name,
98 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
99 #undef R
100         };
101
102         const event_dequeue_burst_t
103                 ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
104 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
105                 [f6][f5][f4][f3][f2][f1][f0] =                          \
106                                 otx2_ssogws_deq_seg_timeout_burst_ ##name,
107 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
108 #undef R
109         };
110
111
112         /* Dual WS modes */
113         const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = {
114 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
115                 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
116 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
117 #undef R
118         };
119
120         const event_dequeue_burst_t
121                 ssogws_dual_deq_burst[2][2][2][2][2][2][2] = {
122 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
123                 [f6][f5][f4][f3][f2][f1][f0] =                          \
124                         otx2_ssogws_dual_deq_burst_ ##name,
125 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
126 #undef R
127         };
128
129         const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = {
130 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
131                 [f6][f5][f4][f3][f2][f1][f0] =                          \
132                         otx2_ssogws_dual_deq_timeout_ ##name,
133 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
134 #undef R
135         };
136
137         const event_dequeue_burst_t
138                 ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = {
139 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
140         [f6][f5][f4][f3][f2][f1][f0] =                                  \
141                         otx2_ssogws_dual_deq_timeout_burst_ ##name,
142 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
143 #undef R
144         };
145
146         const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = {
147 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
148                 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
149 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
150 #undef R
151         };
152
153         const event_dequeue_burst_t
154                 ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
155 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
156                 [f6][f5][f4][f3][f2][f1][f0] =                          \
157                         otx2_ssogws_dual_deq_seg_burst_ ##name,
158 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
159 #undef R
160         };
161
162         const event_dequeue_t
163                 ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = {
164 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
165                 [f6][f5][f4][f3][f2][f1][f0] =                          \
166                         otx2_ssogws_dual_deq_seg_timeout_ ##name,
167 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
168 #undef R
169         };
170
171         const event_dequeue_burst_t
172                 ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
173 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                      \
174                 [f6][f5][f4][f3][f2][f1][f0] =                          \
175                         otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
176 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
177 #undef R
178         };
179
180         /* Tx modes */
181         const event_tx_adapter_enqueue
182                 ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
183 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                  \
184                 [f6][f5][f4][f3][f2][f1][f0] =                          \
185                         otx2_ssogws_tx_adptr_enq_ ## name,
186 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
187 #undef T
188         };
189
190         const event_tx_adapter_enqueue
191                 ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
192 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                  \
193                 [f6][f5][f4][f3][f2][f1][f0] =                          \
194                         otx2_ssogws_tx_adptr_enq_seg_ ## name,
195 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
196 #undef T
197         };
198
199         const event_tx_adapter_enqueue
200                 ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
201 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                  \
202                 [f6][f5][f4][f3][f2][f1][f0] =                          \
203                         otx2_ssogws_dual_tx_adptr_enq_ ## name,
204 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
205 #undef T
206         };
207
208         const event_tx_adapter_enqueue
209                 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
210 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                  \
211                 [f6][f5][f4][f3][f2][f1][f0] =                          \
212                         otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
213 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
214 #undef T
215         };
216
217         event_dev->enqueue                      = otx2_ssogws_enq;
218         event_dev->enqueue_burst                = otx2_ssogws_enq_burst;
219         event_dev->enqueue_new_burst            = otx2_ssogws_enq_new_burst;
220         event_dev->enqueue_forward_burst        = otx2_ssogws_enq_fwd_burst;
221         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
222                 event_dev->dequeue              = ssogws_deq_seg
223                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
224                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
225                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
226                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
227                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
228                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
229                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
230                 event_dev->dequeue_burst        = ssogws_deq_seg_burst
231                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
232                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
233                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
234                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
235                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
236                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
237                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
238                 if (dev->is_timeout_deq) {
239                         event_dev->dequeue      = ssogws_deq_seg_timeout
240                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
241                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
242                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
243                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
244                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
245                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
246                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
247                         event_dev->dequeue_burst        =
248                                 ssogws_deq_seg_timeout_burst
249                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
250                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
251                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
252                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
253                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
254                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
255                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
256                 }
257         } else {
258                 event_dev->dequeue                      = ssogws_deq
259                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
260                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
261                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
262                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
263                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
264                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
265                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
266                 event_dev->dequeue_burst                = ssogws_deq_burst
267                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
268                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
269                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
270                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
271                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
272                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
273                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
274                 if (dev->is_timeout_deq) {
275                         event_dev->dequeue              = ssogws_deq_timeout
276                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
277                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
278                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
279                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
280                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
281                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
282                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
283                         event_dev->dequeue_burst        =
284                                 ssogws_deq_timeout_burst
285                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
286                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
287                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
288                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
289                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
290                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
291                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
292                 }
293         }
294
295         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
296                 /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
297                 event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
298                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
299                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
300                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
301                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
302                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
303                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
304                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
305         } else {
306                 event_dev->txa_enqueue = ssogws_tx_adptr_enq
307                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
308                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
309                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
310                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
311                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
312                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
313                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
314         }
315         event_dev->ca_enqueue = otx2_ssogws_ca_enq;
316
317         if (dev->dual_ws) {
318                 event_dev->enqueue              = otx2_ssogws_dual_enq;
319                 event_dev->enqueue_burst        = otx2_ssogws_dual_enq_burst;
320                 event_dev->enqueue_new_burst    =
321                                         otx2_ssogws_dual_enq_new_burst;
322                 event_dev->enqueue_forward_burst =
323                                         otx2_ssogws_dual_enq_fwd_burst;
324
325                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
326                         event_dev->dequeue      = ssogws_dual_deq_seg
327                                 [!!(dev->rx_offloads &
328                                                 NIX_RX_OFFLOAD_SECURITY_F)]
329                                 [!!(dev->rx_offloads &
330                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
331                                 [!!(dev->rx_offloads &
332                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
333                                 [!!(dev->rx_offloads &
334                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
335                                 [!!(dev->rx_offloads &
336                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
337                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
338                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
339                         event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
340                                 [!!(dev->rx_offloads &
341                                                 NIX_RX_OFFLOAD_SECURITY_F)]
342                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
343                                 [!!(dev->rx_offloads &
344                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
345                                 [!!(dev->rx_offloads &
346                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
347                                 [!!(dev->rx_offloads &
348                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
349                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
350                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
351                         if (dev->is_timeout_deq) {
352                                 event_dev->dequeue      =
353                                         ssogws_dual_deq_seg_timeout
354                                         [!!(dev->rx_offloads &
355                                                 NIX_RX_OFFLOAD_SECURITY_F)]
356                                         [!!(dev->rx_offloads &
357                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
358                                         [!!(dev->rx_offloads &
359                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
360                                         [!!(dev->rx_offloads &
361                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
362                                         [!!(dev->rx_offloads &
363                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
364                                         [!!(dev->rx_offloads &
365                                                         NIX_RX_OFFLOAD_PTYPE_F)]
366                                         [!!(dev->rx_offloads &
367                                                         NIX_RX_OFFLOAD_RSS_F)];
368                                 event_dev->dequeue_burst =
369                                         ssogws_dual_deq_seg_timeout_burst
370                                         [!!(dev->rx_offloads &
371                                                 NIX_RX_OFFLOAD_SECURITY_F)]
372                                         [!!(dev->rx_offloads &
373                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
374                                         [!!(dev->rx_offloads &
375                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
376                                         [!!(dev->rx_offloads &
377                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
378                                         [!!(dev->rx_offloads &
379                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
380                                         [!!(dev->rx_offloads &
381                                                         NIX_RX_OFFLOAD_PTYPE_F)]
382                                         [!!(dev->rx_offloads &
383                                                         NIX_RX_OFFLOAD_RSS_F)];
384                         }
385                 } else {
386                         event_dev->dequeue              = ssogws_dual_deq
387                                 [!!(dev->rx_offloads &
388                                                 NIX_RX_OFFLOAD_SECURITY_F)]
389                                 [!!(dev->rx_offloads &
390                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
391                                 [!!(dev->rx_offloads &
392                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
393                                 [!!(dev->rx_offloads &
394                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
395                                 [!!(dev->rx_offloads &
396                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
397                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
398                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
399                         event_dev->dequeue_burst        = ssogws_dual_deq_burst
400                                 [!!(dev->rx_offloads &
401                                                 NIX_RX_OFFLOAD_SECURITY_F)]
402                                 [!!(dev->rx_offloads &
403                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
404                                 [!!(dev->rx_offloads &
405                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
406                                 [!!(dev->rx_offloads &
407                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
408                                 [!!(dev->rx_offloads &
409                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
410                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
411                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
412                         if (dev->is_timeout_deq) {
413                                 event_dev->dequeue      =
414                                         ssogws_dual_deq_timeout
415                                         [!!(dev->rx_offloads &
416                                                 NIX_RX_OFFLOAD_SECURITY_F)]
417                                         [!!(dev->rx_offloads &
418                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
419                                         [!!(dev->rx_offloads &
420                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
421                                         [!!(dev->rx_offloads &
422                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
423                                         [!!(dev->rx_offloads &
424                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
425                                         [!!(dev->rx_offloads &
426                                                         NIX_RX_OFFLOAD_PTYPE_F)]
427                                         [!!(dev->rx_offloads &
428                                                         NIX_RX_OFFLOAD_RSS_F)];
429                                 event_dev->dequeue_burst =
430                                         ssogws_dual_deq_timeout_burst
431                                         [!!(dev->rx_offloads &
432                                                 NIX_RX_OFFLOAD_SECURITY_F)]
433                                         [!!(dev->rx_offloads &
434                                                 NIX_RX_OFFLOAD_TSTAMP_F)]
435                                         [!!(dev->rx_offloads &
436                                                 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
437                                         [!!(dev->rx_offloads &
438                                                 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
439                                         [!!(dev->rx_offloads &
440                                                 NIX_RX_OFFLOAD_CHECKSUM_F)]
441                                         [!!(dev->rx_offloads &
442                                                         NIX_RX_OFFLOAD_PTYPE_F)]
443                                         [!!(dev->rx_offloads &
444                                                         NIX_RX_OFFLOAD_RSS_F)];
445                         }
446                 }
447
448                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
449                 /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
450                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
451                                 [!!(dev->tx_offloads &
452                                                 NIX_TX_OFFLOAD_SECURITY_F)]
453                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
454                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
455                                 [!!(dev->tx_offloads &
456                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
457                                 [!!(dev->tx_offloads &
458                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
459                                 [!!(dev->tx_offloads &
460                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
461                                 [!!(dev->tx_offloads &
462                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
463                 } else {
464                         event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
465                                 [!!(dev->tx_offloads &
466                                                 NIX_TX_OFFLOAD_SECURITY_F)]
467                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
468                                 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
469                                 [!!(dev->tx_offloads &
470                                                 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
471                                 [!!(dev->tx_offloads &
472                                                 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
473                                 [!!(dev->tx_offloads &
474                                                 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
475                                 [!!(dev->tx_offloads &
476                                                 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
477                 }
478                 event_dev->ca_enqueue = otx2_ssogws_dual_ca_enq;
479         }
480
481         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
482         rte_mb();
483 }
484
485 static void
486 otx2_sso_info_get(struct rte_eventdev *event_dev,
487                   struct rte_event_dev_info *dev_info)
488 {
489         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
490
491         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
492         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
493         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
494         dev_info->max_event_queues = dev->max_event_queues;
495         dev_info->max_event_queue_flows = (1ULL << 20);
496         dev_info->max_event_queue_priority_levels = 8;
497         dev_info->max_event_priority_levels = 1;
498         dev_info->max_event_ports = dev->max_event_ports;
499         dev_info->max_event_port_dequeue_depth = 1;
500         dev_info->max_event_port_enqueue_depth = 1;
501         dev_info->max_num_events =  dev->max_num_events;
502         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
503                                         RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
504                                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
505                                         RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
506                                         RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
507                                         RTE_EVENT_DEV_CAP_NONSEQ_MODE |
508                                         RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
509 }
510
511 static void
512 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
513 {
514         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
515         uint64_t val;
516
517         val = queue;
518         val |= 0ULL << 12; /* SET 0 */
519         val |= 0x8000800080000000; /* Dont modify rest of the masks */
520         val |= (uint64_t)enable << 14;   /* Enable/Disable Membership. */
521
522         otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
523 }
524
525 static int
526 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
527                    const uint8_t queues[], const uint8_t priorities[],
528                    uint16_t nb_links)
529 {
530         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
531         uint8_t port_id = 0;
532         uint16_t link;
533
534         RTE_SET_USED(priorities);
535         for (link = 0; link < nb_links; link++) {
536                 if (dev->dual_ws) {
537                         struct otx2_ssogws_dual *ws = port;
538
539                         port_id = ws->port;
540                         sso_port_link_modify((struct otx2_ssogws *)
541                                         &ws->ws_state[0], queues[link], true);
542                         sso_port_link_modify((struct otx2_ssogws *)
543                                         &ws->ws_state[1], queues[link], true);
544                 } else {
545                         struct otx2_ssogws *ws = port;
546
547                         port_id = ws->port;
548                         sso_port_link_modify(ws, queues[link], true);
549                 }
550         }
551         sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
552
553         return (int)nb_links;
554 }
555
556 static int
557 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
558                      uint8_t queues[], uint16_t nb_unlinks)
559 {
560         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
561         uint8_t port_id = 0;
562         uint16_t unlink;
563
564         for (unlink = 0; unlink < nb_unlinks; unlink++) {
565                 if (dev->dual_ws) {
566                         struct otx2_ssogws_dual *ws = port;
567
568                         port_id = ws->port;
569                         sso_port_link_modify((struct otx2_ssogws *)
570                                         &ws->ws_state[0], queues[unlink],
571                                         false);
572                         sso_port_link_modify((struct otx2_ssogws *)
573                                         &ws->ws_state[1], queues[unlink],
574                                         false);
575                 } else {
576                         struct otx2_ssogws *ws = port;
577
578                         port_id = ws->port;
579                         sso_port_link_modify(ws, queues[unlink], false);
580                 }
581         }
582         sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
583
584         return (int)nb_unlinks;
585 }
586
587 static int
588 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
589               uint16_t nb_lf, uint8_t attach)
590 {
591         if (attach) {
592                 struct rsrc_attach_req *req;
593
594                 req = otx2_mbox_alloc_msg_attach_resources(mbox);
595                 switch (type) {
596                 case SSO_LF_GGRP:
597                         req->sso = nb_lf;
598                         break;
599                 case SSO_LF_GWS:
600                         req->ssow = nb_lf;
601                         break;
602                 default:
603                         return -EINVAL;
604                 }
605                 req->modify = true;
606                 if (otx2_mbox_process(mbox) < 0)
607                         return -EIO;
608         } else {
609                 struct rsrc_detach_req *req;
610
611                 req = otx2_mbox_alloc_msg_detach_resources(mbox);
612                 switch (type) {
613                 case SSO_LF_GGRP:
614                         req->sso = true;
615                         break;
616                 case SSO_LF_GWS:
617                         req->ssow = true;
618                         break;
619                 default:
620                         return -EINVAL;
621                 }
622                 req->partial = true;
623                 if (otx2_mbox_process(mbox) < 0)
624                         return -EIO;
625         }
626
627         return 0;
628 }
629
630 static int
631 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
632            enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
633 {
634         void *rsp;
635         int rc;
636
637         if (alloc) {
638                 switch (type) {
639                 case SSO_LF_GGRP:
640                         {
641                         struct sso_lf_alloc_req *req_ggrp;
642                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
643                         req_ggrp->hwgrps = nb_lf;
644                         }
645                         break;
646                 case SSO_LF_GWS:
647                         {
648                         struct ssow_lf_alloc_req *req_hws;
649                         req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
650                         req_hws->hws = nb_lf;
651                         }
652                         break;
653                 default:
654                         return -EINVAL;
655                 }
656         } else {
657                 switch (type) {
658                 case SSO_LF_GGRP:
659                         {
660                         struct sso_lf_free_req *req_ggrp;
661                         req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
662                         req_ggrp->hwgrps = nb_lf;
663                         }
664                         break;
665                 case SSO_LF_GWS:
666                         {
667                         struct ssow_lf_free_req *req_hws;
668                         req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
669                         req_hws->hws = nb_lf;
670                         }
671                         break;
672                 default:
673                         return -EINVAL;
674                 }
675         }
676
677         rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
678         if (rc < 0)
679                 return rc;
680
681         if (alloc && type == SSO_LF_GGRP) {
682                 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
683
684                 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
685                 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
686                 dev->iue = rsp_ggrp->in_unit_entries;
687         }
688
689         return 0;
690 }
691
692 static void
693 otx2_sso_port_release(void *port)
694 {
695         struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
696         struct otx2_sso_evdev *dev;
697         int i;
698
699         if (!gws_cookie->configured)
700                 goto free;
701
702         dev = sso_pmd_priv(gws_cookie->event_dev);
703         if (dev->dual_ws) {
704                 struct otx2_ssogws_dual *ws = port;
705
706                 for (i = 0; i < dev->nb_event_queues; i++) {
707                         sso_port_link_modify((struct otx2_ssogws *)
708                                              &ws->ws_state[0], i, false);
709                         sso_port_link_modify((struct otx2_ssogws *)
710                                              &ws->ws_state[1], i, false);
711                 }
712                 memset(ws, 0, sizeof(*ws));
713         } else {
714                 struct otx2_ssogws *ws = port;
715
716                 for (i = 0; i < dev->nb_event_queues; i++)
717                         sso_port_link_modify(ws, i, false);
718                 memset(ws, 0, sizeof(*ws));
719         }
720
721         memset(gws_cookie, 0, sizeof(*gws_cookie));
722
723 free:
724         rte_free(gws_cookie);
725 }
726
727 static void
728 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
729 {
730         RTE_SET_USED(event_dev);
731         RTE_SET_USED(queue_id);
732 }
733
734 static void
735 sso_restore_links(const struct rte_eventdev *event_dev)
736 {
737         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
738         uint16_t *links_map;
739         int i, j;
740
741         for (i = 0; i < dev->nb_event_ports; i++) {
742                 links_map = event_dev->data->links_map;
743                 /* Point links_map to this port specific area */
744                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
745                 if (dev->dual_ws) {
746                         struct otx2_ssogws_dual *ws;
747
748                         ws = event_dev->data->ports[i];
749                         for (j = 0; j < dev->nb_event_queues; j++) {
750                                 if (links_map[j] == 0xdead)
751                                         continue;
752                                 sso_port_link_modify((struct otx2_ssogws *)
753                                                 &ws->ws_state[0], j, true);
754                                 sso_port_link_modify((struct otx2_ssogws *)
755                                                 &ws->ws_state[1], j, true);
756                                 sso_func_trace("Restoring port %d queue %d "
757                                                 "link", i, j);
758                         }
759                 } else {
760                         struct otx2_ssogws *ws;
761
762                         ws = event_dev->data->ports[i];
763                         for (j = 0; j < dev->nb_event_queues; j++) {
764                                 if (links_map[j] == 0xdead)
765                                         continue;
766                                 sso_port_link_modify(ws, j, true);
767                                 sso_func_trace("Restoring port %d queue %d "
768                                                 "link", i, j);
769                         }
770                 }
771         }
772 }
773
774 static void
775 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
776 {
777         ws->tag_op              = base + SSOW_LF_GWS_TAG;
778         ws->wqp_op              = base + SSOW_LF_GWS_WQP;
779         ws->getwrk_op           = base + SSOW_LF_GWS_OP_GET_WORK;
780         ws->swtag_flush_op      = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
781         ws->swtag_norm_op       = base + SSOW_LF_GWS_OP_SWTAG_NORM;
782         ws->swtag_desched_op    = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
783 }
784
785 static int
786 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
787 {
788         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
789         struct otx2_mbox *mbox = dev->mbox;
790         uint8_t vws = 0;
791         uint8_t nb_lf;
792         int i, rc;
793
794         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
795
796         nb_lf = dev->nb_event_ports * 2;
797         /* Ask AF to attach required LFs. */
798         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
799         if (rc < 0) {
800                 otx2_err("Failed to attach SSO GWS LF");
801                 return -ENODEV;
802         }
803
804         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
805                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
806                 otx2_err("Failed to init SSO GWS LF");
807                 return -ENODEV;
808         }
809
810         for (i = 0; i < dev->nb_event_ports; i++) {
811                 struct otx2_ssogws_cookie *gws_cookie;
812                 struct otx2_ssogws_dual *ws;
813                 uintptr_t base;
814
815                 if (event_dev->data->ports[i] != NULL) {
816                         ws = event_dev->data->ports[i];
817                 } else {
818                         /* Allocate event port memory */
819                         ws = rte_zmalloc_socket("otx2_sso_ws",
820                                         sizeof(struct otx2_ssogws_dual) +
821                                         RTE_CACHE_LINE_SIZE,
822                                         RTE_CACHE_LINE_SIZE,
823                                         event_dev->data->socket_id);
824                         if (ws == NULL) {
825                                 otx2_err("Failed to alloc memory for port=%d",
826                                          i);
827                                 rc = -ENOMEM;
828                                 break;
829                         }
830
831                         /* First cache line is reserved for cookie */
832                         ws = (struct otx2_ssogws_dual *)
833                                 ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
834                 }
835
836                 ws->port = i;
837                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
838                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
839                 ws->base[0] = base;
840                 vws++;
841
842                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
843                 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
844                 ws->base[1] = base;
845                 vws++;
846
847                 gws_cookie = ssogws_get_cookie(ws);
848                 gws_cookie->event_dev = event_dev;
849                 gws_cookie->configured = 1;
850
851                 event_dev->data->ports[i] = ws;
852         }
853
854         if (rc < 0) {
855                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
856                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
857         }
858
859         return rc;
860 }
861
862 static int
863 sso_configure_ports(const struct rte_eventdev *event_dev)
864 {
865         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
866         struct otx2_mbox *mbox = dev->mbox;
867         uint8_t nb_lf;
868         int i, rc;
869
870         otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
871
872         nb_lf = dev->nb_event_ports;
873         /* Ask AF to attach required LFs. */
874         rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
875         if (rc < 0) {
876                 otx2_err("Failed to attach SSO GWS LF");
877                 return -ENODEV;
878         }
879
880         if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
881                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
882                 otx2_err("Failed to init SSO GWS LF");
883                 return -ENODEV;
884         }
885
886         for (i = 0; i < nb_lf; i++) {
887                 struct otx2_ssogws_cookie *gws_cookie;
888                 struct otx2_ssogws *ws;
889                 uintptr_t base;
890
891                 if (event_dev->data->ports[i] != NULL) {
892                         ws = event_dev->data->ports[i];
893                 } else {
894                         /* Allocate event port memory */
895                         ws = rte_zmalloc_socket("otx2_sso_ws",
896                                                 sizeof(struct otx2_ssogws) +
897                                                 RTE_CACHE_LINE_SIZE,
898                                                 RTE_CACHE_LINE_SIZE,
899                                                 event_dev->data->socket_id);
900                         if (ws == NULL) {
901                                 otx2_err("Failed to alloc memory for port=%d",
902                                          i);
903                                 rc = -ENOMEM;
904                                 break;
905                         }
906
907                         /* First cache line is reserved for cookie */
908                         ws = (struct otx2_ssogws *)
909                                 ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
910                 }
911
912                 ws->port = i;
913                 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
914                 sso_set_port_ops(ws, base);
915                 ws->base = base;
916
917                 gws_cookie = ssogws_get_cookie(ws);
918                 gws_cookie->event_dev = event_dev;
919                 gws_cookie->configured = 1;
920
921                 event_dev->data->ports[i] = ws;
922         }
923
924         if (rc < 0) {
925                 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
926                 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
927         }
928
929         return rc;
930 }
931
932 static int
933 sso_configure_queues(const struct rte_eventdev *event_dev)
934 {
935         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
936         struct otx2_mbox *mbox = dev->mbox;
937         uint8_t nb_lf;
938         int rc;
939
940         otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
941
942         nb_lf = dev->nb_event_queues;
943         /* Ask AF to attach required LFs. */
944         rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
945         if (rc < 0) {
946                 otx2_err("Failed to attach SSO GGRP LF");
947                 return -ENODEV;
948         }
949
950         if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
951                 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
952                 otx2_err("Failed to init SSO GGRP LF");
953                 return -ENODEV;
954         }
955
956         return rc;
957 }
958
959 static int
960 sso_xaq_allocate(struct otx2_sso_evdev *dev)
961 {
962         const struct rte_memzone *mz;
963         struct npa_aura_s *aura;
964         static int reconfig_cnt;
965         char pool_name[RTE_MEMZONE_NAMESIZE];
966         uint32_t xaq_cnt;
967         int rc;
968
969         if (dev->xaq_pool)
970                 rte_mempool_free(dev->xaq_pool);
971
972         /*
973          * Allocate memory for Add work backpressure.
974          */
975         mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
976         if (mz == NULL)
977                 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
978                                                  OTX2_ALIGN +
979                                                  sizeof(struct npa_aura_s),
980                                                  rte_socket_id(),
981                                                  RTE_MEMZONE_IOVA_CONTIG,
982                                                  OTX2_ALIGN);
983         if (mz == NULL) {
984                 otx2_err("Failed to allocate mem for fcmem");
985                 return -ENOMEM;
986         }
987
988         dev->fc_iova = mz->iova;
989         dev->fc_mem = mz->addr;
990         *dev->fc_mem = 0;
991         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
992         memset(aura, 0, sizeof(struct npa_aura_s));
993
994         aura->fc_ena = 1;
995         aura->fc_addr = dev->fc_iova;
996         aura->fc_hyst_bits = 0; /* Store count on all updates */
997
998         /* Taken from HRM 14.3.3(4) */
999         xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
1000         if (dev->xae_cnt)
1001                 xaq_cnt += dev->xae_cnt / dev->xae_waes;
1002         else if (dev->adptr_xae_cnt)
1003                 xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
1004                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
1005         else
1006                 xaq_cnt += (dev->iue / dev->xae_waes) +
1007                         (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
1008
1009         otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
1010         /* Setup XAQ based on number of nb queues. */
1011         snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
1012         dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
1013                         xaq_cnt, dev->xaq_buf_size, 0, 0,
1014                         rte_socket_id(), 0);
1015
1016         if (dev->xaq_pool == NULL) {
1017                 otx2_err("Unable to create empty mempool.");
1018                 rte_memzone_free(mz);
1019                 return -ENOMEM;
1020         }
1021
1022         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
1023                                         rte_mbuf_platform_mempool_ops(), aura);
1024         if (rc != 0) {
1025                 otx2_err("Unable to set xaqpool ops.");
1026                 goto alloc_fail;
1027         }
1028
1029         rc = rte_mempool_populate_default(dev->xaq_pool);
1030         if (rc < 0) {
1031                 otx2_err("Unable to set populate xaqpool.");
1032                 goto alloc_fail;
1033         }
1034         reconfig_cnt++;
1035         /* When SW does addwork (enqueue) check if there is space in XAQ by
1036          * comparing fc_addr above against the xaq_lmt calculated below.
1037          * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
1038          * to request XAQ to cache them even before enqueue is called.
1039          */
1040         dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
1041                                   dev->nb_event_queues);
1042         dev->nb_xaq_cfg = xaq_cnt;
1043
1044         return 0;
1045 alloc_fail:
1046         rte_mempool_free(dev->xaq_pool);
1047         rte_memzone_free(mz);
1048         return rc;
1049 }
1050
1051 static int
1052 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
1053 {
1054         struct otx2_mbox *mbox = dev->mbox;
1055         struct sso_hw_setconfig *req;
1056
1057         otx2_sso_dbg("Configuring XAQ for GGRPs");
1058         req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
1059         req->npa_pf_func = otx2_npa_pf_func_get();
1060         req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
1061         req->hwgrps = dev->nb_event_queues;
1062
1063         return otx2_mbox_process(mbox);
1064 }
1065
1066 static int
1067 sso_ggrp_free_xaq(struct otx2_sso_evdev *dev)
1068 {
1069         struct otx2_mbox *mbox = dev->mbox;
1070         struct sso_release_xaq *req;
1071
1072         otx2_sso_dbg("Freeing XAQ for GGRPs");
1073         req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox);
1074         req->hwgrps = dev->nb_event_queues;
1075
1076         return otx2_mbox_process(mbox);
1077 }
1078
1079 static void
1080 sso_lf_teardown(struct otx2_sso_evdev *dev,
1081                 enum otx2_sso_lf_type lf_type)
1082 {
1083         uint8_t nb_lf;
1084
1085         switch (lf_type) {
1086         case SSO_LF_GGRP:
1087                 nb_lf = dev->nb_event_queues;
1088                 break;
1089         case SSO_LF_GWS:
1090                 nb_lf = dev->nb_event_ports;
1091                 nb_lf *= dev->dual_ws ? 2 : 1;
1092                 break;
1093         default:
1094                 return;
1095         }
1096
1097         sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
1098         sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
1099 }
1100
1101 static int
1102 otx2_sso_configure(const struct rte_eventdev *event_dev)
1103 {
1104         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
1105         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1106         uint32_t deq_tmo_ns;
1107         int rc;
1108
1109         sso_func_trace();
1110         deq_tmo_ns = conf->dequeue_timeout_ns;
1111
1112         if (deq_tmo_ns == 0)
1113                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
1114
1115         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
1116             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
1117                 otx2_err("Unsupported dequeue timeout requested");
1118                 return -EINVAL;
1119         }
1120
1121         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
1122                 dev->is_timeout_deq = 1;
1123
1124         dev->deq_tmo_ns = deq_tmo_ns;
1125
1126         if (conf->nb_event_ports > dev->max_event_ports ||
1127             conf->nb_event_queues > dev->max_event_queues) {
1128                 otx2_err("Unsupported event queues/ports requested");
1129                 return -EINVAL;
1130         }
1131
1132         if (conf->nb_event_port_dequeue_depth > 1) {
1133                 otx2_err("Unsupported event port deq depth requested");
1134                 return -EINVAL;
1135         }
1136
1137         if (conf->nb_event_port_enqueue_depth > 1) {
1138                 otx2_err("Unsupported event port enq depth requested");
1139                 return -EINVAL;
1140         }
1141
1142         if (dev->configured)
1143                 sso_unregister_irqs(event_dev);
1144
1145         if (dev->nb_event_queues) {
1146                 /* Finit any previous queues. */
1147                 sso_lf_teardown(dev, SSO_LF_GGRP);
1148         }
1149         if (dev->nb_event_ports) {
1150                 /* Finit any previous ports. */
1151                 sso_lf_teardown(dev, SSO_LF_GWS);
1152         }
1153
1154         dev->nb_event_queues = conf->nb_event_queues;
1155         dev->nb_event_ports = conf->nb_event_ports;
1156
1157         if (dev->dual_ws)
1158                 rc = sso_configure_dual_ports(event_dev);
1159         else
1160                 rc = sso_configure_ports(event_dev);
1161
1162         if (rc < 0) {
1163                 otx2_err("Failed to configure event ports");
1164                 return -ENODEV;
1165         }
1166
1167         if (sso_configure_queues(event_dev) < 0) {
1168                 otx2_err("Failed to configure event queues");
1169                 rc = -ENODEV;
1170                 goto teardown_hws;
1171         }
1172
1173         if (sso_xaq_allocate(dev) < 0) {
1174                 rc = -ENOMEM;
1175                 goto teardown_hwggrp;
1176         }
1177
1178         /* Restore any prior port-queue mapping. */
1179         sso_restore_links(event_dev);
1180         rc = sso_ggrp_alloc_xaq(dev);
1181         if (rc < 0) {
1182                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1183                 goto teardown_hwggrp;
1184         }
1185
1186         rc = sso_get_msix_offsets(event_dev);
1187         if (rc < 0) {
1188                 otx2_err("Failed to get msix offsets %d", rc);
1189                 goto teardown_hwggrp;
1190         }
1191
1192         rc = sso_register_irqs(event_dev);
1193         if (rc < 0) {
1194                 otx2_err("Failed to register irq %d", rc);
1195                 goto teardown_hwggrp;
1196         }
1197
1198         dev->configured = 1;
1199         rte_mb();
1200
1201         return 0;
1202 teardown_hwggrp:
1203         sso_lf_teardown(dev, SSO_LF_GGRP);
1204 teardown_hws:
1205         sso_lf_teardown(dev, SSO_LF_GWS);
1206         dev->nb_event_queues = 0;
1207         dev->nb_event_ports = 0;
1208         dev->configured = 0;
1209         return rc;
1210 }
1211
1212 static void
1213 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1214                         struct rte_event_queue_conf *queue_conf)
1215 {
1216         RTE_SET_USED(event_dev);
1217         RTE_SET_USED(queue_id);
1218
1219         queue_conf->nb_atomic_flows = (1ULL << 20);
1220         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1221         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1222         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1223 }
1224
1225 static int
1226 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1227                      const struct rte_event_queue_conf *queue_conf)
1228 {
1229         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1230         struct otx2_mbox *mbox = dev->mbox;
1231         struct sso_grp_priority *req;
1232         int rc;
1233
1234         sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1235
1236         req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1237         req->grp = queue_id;
1238         req->weight = 0xFF;
1239         req->affinity = 0xFF;
1240         /* Normalize <0-255> to <0-7> */
1241         req->priority = queue_conf->priority / 32;
1242
1243         rc = otx2_mbox_process(mbox);
1244         if (rc < 0) {
1245                 otx2_err("Failed to set priority queue=%d", queue_id);
1246                 return rc;
1247         }
1248
1249         return 0;
1250 }
1251
1252 static void
1253 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1254                        struct rte_event_port_conf *port_conf)
1255 {
1256         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1257
1258         RTE_SET_USED(port_id);
1259         port_conf->new_event_threshold = dev->max_num_events;
1260         port_conf->dequeue_depth = 1;
1261         port_conf->enqueue_depth = 1;
1262 }
1263
1264 static int
1265 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1266                     const struct rte_event_port_conf *port_conf)
1267 {
1268         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1269         uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1270         uint64_t val;
1271         uint16_t q;
1272
1273         sso_func_trace("Port=%d", port_id);
1274         RTE_SET_USED(port_conf);
1275
1276         if (event_dev->data->ports[port_id] == NULL) {
1277                 otx2_err("Invalid port Id %d", port_id);
1278                 return -EINVAL;
1279         }
1280
1281         for (q = 0; q < dev->nb_event_queues; q++) {
1282                 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1283                 if (grps_base[q] == 0) {
1284                         otx2_err("Failed to get grp[%d] base addr", q);
1285                         return -EINVAL;
1286                 }
1287         }
1288
1289         /* Set get_work timeout for HWS */
1290         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1291
1292         if (dev->dual_ws) {
1293                 struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1294
1295                 rte_memcpy(ws->grps_base, grps_base,
1296                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1297                 ws->fc_mem = dev->fc_mem;
1298                 ws->xaq_lmt = dev->xaq_lmt;
1299                 ws->tstamp = dev->tstamp;
1300                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1301                              ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1302                 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1303                              ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1304         } else {
1305                 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1306                 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1307
1308                 rte_memcpy(ws->grps_base, grps_base,
1309                            sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1310                 ws->fc_mem = dev->fc_mem;
1311                 ws->xaq_lmt = dev->xaq_lmt;
1312                 ws->tstamp = dev->tstamp;
1313                 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1314         }
1315
1316         otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1317
1318         return 0;
1319 }
1320
1321 static int
1322 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1323                        uint64_t *tmo_ticks)
1324 {
1325         RTE_SET_USED(event_dev);
1326         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1327
1328         return 0;
1329 }
1330
1331 static void
1332 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1333 {
1334         uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1335
1336         fprintf(f, "SSOW_LF_GWS Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1337         fprintf(f, "SSOW_LF_GWS_LINKS       0x%" PRIx64 "\n",
1338                 otx2_read64(base + SSOW_LF_GWS_LINKS));
1339         fprintf(f, "SSOW_LF_GWS_PENDWQP     0x%" PRIx64 "\n",
1340                 otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1341         fprintf(f, "SSOW_LF_GWS_PENDSTATE   0x%" PRIx64 "\n",
1342                 otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1343         fprintf(f, "SSOW_LF_GWS_NW_TIM      0x%" PRIx64 "\n",
1344                 otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1345         fprintf(f, "SSOW_LF_GWS_TAG         0x%" PRIx64 "\n",
1346                 otx2_read64(base + SSOW_LF_GWS_TAG));
1347         fprintf(f, "SSOW_LF_GWS_WQP         0x%" PRIx64 "\n",
1348                 otx2_read64(base + SSOW_LF_GWS_TAG));
1349         fprintf(f, "SSOW_LF_GWS_SWTP        0x%" PRIx64 "\n",
1350                 otx2_read64(base + SSOW_LF_GWS_SWTP));
1351         fprintf(f, "SSOW_LF_GWS_PENDTAG     0x%" PRIx64 "\n",
1352                 otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1353 }
1354
1355 static void
1356 ssoggrp_dump(uintptr_t base, FILE *f)
1357 {
1358         fprintf(f, "SSO_LF_GGRP Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1359         fprintf(f, "SSO_LF_GGRP_QCTL        0x%" PRIx64 "\n",
1360                 otx2_read64(base + SSO_LF_GGRP_QCTL));
1361         fprintf(f, "SSO_LF_GGRP_XAQ_CNT     0x%" PRIx64 "\n",
1362                 otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1363         fprintf(f, "SSO_LF_GGRP_INT_THR     0x%" PRIx64 "\n",
1364                 otx2_read64(base + SSO_LF_GGRP_INT_THR));
1365         fprintf(f, "SSO_LF_GGRP_INT_CNT     0x%" PRIX64 "\n",
1366                 otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1367         fprintf(f, "SSO_LF_GGRP_AQ_CNT      0x%" PRIX64 "\n",
1368                 otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1369         fprintf(f, "SSO_LF_GGRP_AQ_THR      0x%" PRIX64 "\n",
1370                 otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1371         fprintf(f, "SSO_LF_GGRP_MISC_CNT    0x%" PRIx64 "\n",
1372                 otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1373 }
1374
1375 static void
1376 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1377 {
1378         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1379         uint8_t queue;
1380         uint8_t port;
1381
1382         fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1383                 "dual_ws" : "single_ws");
1384         /* Dump SSOW registers */
1385         for (port = 0; port < dev->nb_event_ports; port++) {
1386                 if (dev->dual_ws) {
1387                         struct otx2_ssogws_dual *ws =
1388                                 event_dev->data->ports[port];
1389
1390                         fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1391                                 __func__, port, 0);
1392                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1393                         fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1394                                 __func__, port, 1);
1395                         ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1396                 } else {
1397                         fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1398                                 __func__, port);
1399                         ssogws_dump(event_dev->data->ports[port], f);
1400                 }
1401         }
1402
1403         /* Dump SSO registers */
1404         for (queue = 0; queue < dev->nb_event_queues; queue++) {
1405                 fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1406                 if (dev->dual_ws) {
1407                         struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1408                         ssoggrp_dump(ws->grps_base[queue], f);
1409                 } else {
1410                         struct otx2_ssogws *ws = event_dev->data->ports[0];
1411                         ssoggrp_dump(ws->grps_base[queue], f);
1412                 }
1413         }
1414 }
1415
1416 static void
1417 otx2_handle_event(void *arg, struct rte_event event)
1418 {
1419         struct rte_eventdev *event_dev = arg;
1420
1421         if (event_dev->dev_ops->dev_stop_flush != NULL)
1422                 event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1423                                 event, event_dev->data->dev_stop_flush_arg);
1424 }
1425
1426 static void
1427 sso_qos_cfg(struct rte_eventdev *event_dev)
1428 {
1429         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1430         struct sso_grp_qos_cfg *req;
1431         uint16_t i;
1432
1433         for (i = 0; i < dev->qos_queue_cnt; i++) {
1434                 uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1435                 uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1436                 uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1437
1438                 if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1439                         continue;
1440
1441                 req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1442                 req->xaq_limit = (dev->nb_xaq_cfg *
1443                                   (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1444                 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1445                                 (iaq_prcnt ? iaq_prcnt : 100)) / 100;
1446                 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1447                                 (taq_prcnt ? taq_prcnt : 100)) / 100;
1448         }
1449
1450         if (dev->qos_queue_cnt)
1451                 otx2_mbox_process(dev->mbox);
1452 }
1453
1454 static void
1455 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1456 {
1457         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1458         uint16_t i;
1459
1460         for (i = 0; i < dev->nb_event_ports; i++) {
1461                 if (dev->dual_ws) {
1462                         struct otx2_ssogws_dual *ws;
1463
1464                         ws = event_dev->data->ports[i];
1465                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1466                         ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1467                         ws->swtag_req = 0;
1468                         ws->vws = 0;
1469                         ws->fc_mem = dev->fc_mem;
1470                         ws->xaq_lmt = dev->xaq_lmt;
1471                 } else {
1472                         struct otx2_ssogws *ws;
1473
1474                         ws = event_dev->data->ports[i];
1475                         ssogws_reset(ws);
1476                         ws->swtag_req = 0;
1477                         ws->fc_mem = dev->fc_mem;
1478                         ws->xaq_lmt = dev->xaq_lmt;
1479                 }
1480         }
1481
1482         rte_mb();
1483         if (dev->dual_ws) {
1484                 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1485                 struct otx2_ssogws temp_ws;
1486
1487                 memcpy(&temp_ws, &ws->ws_state[0],
1488                        sizeof(struct otx2_ssogws_state));
1489                 for (i = 0; i < dev->nb_event_queues; i++) {
1490                         /* Consume all the events through HWS0 */
1491                         ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1492                                             otx2_handle_event, event_dev);
1493                         /* Enable/Disable SSO GGRP */
1494                         otx2_write64(enable, ws->grps_base[i] +
1495                                      SSO_LF_GGRP_QCTL);
1496                 }
1497         } else {
1498                 struct otx2_ssogws *ws = event_dev->data->ports[0];
1499
1500                 for (i = 0; i < dev->nb_event_queues; i++) {
1501                         /* Consume all the events through HWS0 */
1502                         ssogws_flush_events(ws, i, ws->grps_base[i],
1503                                             otx2_handle_event, event_dev);
1504                         /* Enable/Disable SSO GGRP */
1505                         otx2_write64(enable, ws->grps_base[i] +
1506                                      SSO_LF_GGRP_QCTL);
1507                 }
1508         }
1509
1510         /* reset SSO GWS cache */
1511         otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1512         otx2_mbox_process(dev->mbox);
1513 }
1514
1515 int
1516 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1517 {
1518         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1519         int rc = 0;
1520
1521         if (event_dev->data->dev_started)
1522                 sso_cleanup(event_dev, 0);
1523
1524         rc = sso_ggrp_free_xaq(dev);
1525         if (rc < 0) {
1526                 otx2_err("Failed to free XAQ\n");
1527                 return rc;
1528         }
1529
1530         rte_mempool_free(dev->xaq_pool);
1531         dev->xaq_pool = NULL;
1532         rc = sso_xaq_allocate(dev);
1533         if (rc < 0) {
1534                 otx2_err("Failed to alloc xaq pool %d", rc);
1535                 return rc;
1536         }
1537         rc = sso_ggrp_alloc_xaq(dev);
1538         if (rc < 0) {
1539                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1540                 return rc;
1541         }
1542
1543         rte_mb();
1544         if (event_dev->data->dev_started)
1545                 sso_cleanup(event_dev, 1);
1546
1547         return 0;
1548 }
1549
1550 static int
1551 otx2_sso_start(struct rte_eventdev *event_dev)
1552 {
1553         sso_func_trace();
1554         sso_qos_cfg(event_dev);
1555         sso_cleanup(event_dev, 1);
1556         sso_fastpath_fns_set(event_dev);
1557
1558         return 0;
1559 }
1560
1561 static void
1562 otx2_sso_stop(struct rte_eventdev *event_dev)
1563 {
1564         sso_func_trace();
1565         sso_cleanup(event_dev, 0);
1566         rte_mb();
1567 }
1568
1569 static int
1570 otx2_sso_close(struct rte_eventdev *event_dev)
1571 {
1572         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1573         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1574         uint16_t i;
1575
1576         if (!dev->configured)
1577                 return 0;
1578
1579         sso_unregister_irqs(event_dev);
1580
1581         for (i = 0; i < dev->nb_event_queues; i++)
1582                 all_queues[i] = i;
1583
1584         for (i = 0; i < dev->nb_event_ports; i++)
1585                 otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1586                                      all_queues, dev->nb_event_queues);
1587
1588         sso_lf_teardown(dev, SSO_LF_GGRP);
1589         sso_lf_teardown(dev, SSO_LF_GWS);
1590         dev->nb_event_ports = 0;
1591         dev->nb_event_queues = 0;
1592         rte_mempool_free(dev->xaq_pool);
1593         rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1594
1595         return 0;
1596 }
1597
1598 /* Initialize and register event driver with DPDK Application */
1599 static struct rte_eventdev_ops otx2_sso_ops = {
1600         .dev_infos_get    = otx2_sso_info_get,
1601         .dev_configure    = otx2_sso_configure,
1602         .queue_def_conf   = otx2_sso_queue_def_conf,
1603         .queue_setup      = otx2_sso_queue_setup,
1604         .queue_release    = otx2_sso_queue_release,
1605         .port_def_conf    = otx2_sso_port_def_conf,
1606         .port_setup       = otx2_sso_port_setup,
1607         .port_release     = otx2_sso_port_release,
1608         .port_link        = otx2_sso_port_link,
1609         .port_unlink      = otx2_sso_port_unlink,
1610         .timeout_ticks    = otx2_sso_timeout_ticks,
1611
1612         .eth_rx_adapter_caps_get  = otx2_sso_rx_adapter_caps_get,
1613         .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1614         .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1615         .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1616         .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1617
1618         .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1619         .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1620         .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1621
1622         .timer_adapter_caps_get = otx2_tim_caps_get,
1623
1624         .crypto_adapter_caps_get = otx2_ca_caps_get,
1625         .crypto_adapter_queue_pair_add = otx2_ca_qp_add,
1626         .crypto_adapter_queue_pair_del = otx2_ca_qp_del,
1627
1628         .xstats_get       = otx2_sso_xstats_get,
1629         .xstats_reset     = otx2_sso_xstats_reset,
1630         .xstats_get_names = otx2_sso_xstats_get_names,
1631
1632         .dump             = otx2_sso_dump,
1633         .dev_start        = otx2_sso_start,
1634         .dev_stop         = otx2_sso_stop,
1635         .dev_close        = otx2_sso_close,
1636         .dev_selftest     = otx2_sso_selftest,
1637 };
1638
1639 #define OTX2_SSO_XAE_CNT        "xae_cnt"
1640 #define OTX2_SSO_SINGLE_WS      "single_ws"
1641 #define OTX2_SSO_GGRP_QOS       "qos"
1642
1643 static void
1644 parse_queue_param(char *value, void *opaque)
1645 {
1646         struct otx2_sso_qos queue_qos = {0};
1647         uint8_t *val = (uint8_t *)&queue_qos;
1648         struct otx2_sso_evdev *dev = opaque;
1649         char *tok = strtok(value, "-");
1650         struct otx2_sso_qos *old_ptr;
1651
1652         if (!strlen(value))
1653                 return;
1654
1655         while (tok != NULL) {
1656                 *val = atoi(tok);
1657                 tok = strtok(NULL, "-");
1658                 val++;
1659         }
1660
1661         if (val != (&queue_qos.iaq_prcnt + 1)) {
1662                 otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1663                 return;
1664         }
1665
1666         dev->qos_queue_cnt++;
1667         old_ptr = dev->qos_parse_data;
1668         dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1669                                           sizeof(struct otx2_sso_qos) *
1670                                           dev->qos_queue_cnt, 0);
1671         if (dev->qos_parse_data == NULL) {
1672                 dev->qos_parse_data = old_ptr;
1673                 dev->qos_queue_cnt--;
1674                 return;
1675         }
1676         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1677 }
1678
1679 static void
1680 parse_qos_list(const char *value, void *opaque)
1681 {
1682         char *s = strdup(value);
1683         char *start = NULL;
1684         char *end = NULL;
1685         char *f = s;
1686
1687         while (*s) {
1688                 if (*s == '[')
1689                         start = s;
1690                 else if (*s == ']')
1691                         end = s;
1692
1693                 if (start && start < end) {
1694                         *end = 0;
1695                         parse_queue_param(start + 1, opaque);
1696                         s = end;
1697                         start = end;
1698                 }
1699                 s++;
1700         }
1701
1702         free(f);
1703 }
1704
1705 static int
1706 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1707 {
1708         RTE_SET_USED(key);
1709
1710         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1711          * isn't allowed. Everything is expressed in percentages, 0 represents
1712          * default.
1713          */
1714         parse_qos_list(value, opaque);
1715
1716         return 0;
1717 }
1718
1719 static void
1720 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1721 {
1722         struct rte_kvargs *kvlist;
1723         uint8_t single_ws = 0;
1724
1725         if (devargs == NULL)
1726                 return;
1727         kvlist = rte_kvargs_parse(devargs->args, NULL);
1728         if (kvlist == NULL)
1729                 return;
1730
1731         rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1732                            &dev->xae_cnt);
1733         rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1734                            &single_ws);
1735         rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1736                            dev);
1737         otx2_parse_common_devargs(kvlist);
1738         dev->dual_ws = !single_ws;
1739         rte_kvargs_free(kvlist);
1740 }
1741
1742 static int
1743 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1744 {
1745         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1746                                        sizeof(struct otx2_sso_evdev),
1747                                        otx2_sso_init);
1748 }
1749
1750 static int
1751 otx2_sso_remove(struct rte_pci_device *pci_dev)
1752 {
1753         return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1754 }
1755
1756 static const struct rte_pci_id pci_sso_map[] = {
1757         {
1758                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1759                                PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1760         },
1761         {
1762                 .vendor_id = 0,
1763         },
1764 };
1765
1766 static struct rte_pci_driver pci_sso = {
1767         .id_table = pci_sso_map,
1768         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1769         .probe = otx2_sso_probe,
1770         .remove = otx2_sso_remove,
1771 };
1772
1773 int
1774 otx2_sso_init(struct rte_eventdev *event_dev)
1775 {
1776         struct free_rsrcs_rsp *rsrc_cnt;
1777         struct rte_pci_device *pci_dev;
1778         struct otx2_sso_evdev *dev;
1779         int rc;
1780
1781         event_dev->dev_ops = &otx2_sso_ops;
1782         /* For secondary processes, the primary has done all the work */
1783         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1784                 sso_fastpath_fns_set(event_dev);
1785                 return 0;
1786         }
1787
1788         dev = sso_pmd_priv(event_dev);
1789
1790         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1791
1792         /* Initialize the base otx2_dev object */
1793         rc = otx2_dev_init(pci_dev, dev);
1794         if (rc < 0) {
1795                 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1796                 goto error;
1797         }
1798
1799         /* Get SSO and SSOW MSIX rsrc cnt */
1800         otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1801         rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1802         if (rc < 0) {
1803                 otx2_err("Unable to get free rsrc count");
1804                 goto otx2_dev_uninit;
1805         }
1806         otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1807                      rsrc_cnt->ssow, rsrc_cnt->npa);
1808
1809         dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1810         dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1811         /* Grab the NPA LF if required */
1812         rc = otx2_npa_lf_init(pci_dev, dev);
1813         if (rc < 0) {
1814                 otx2_err("Unable to init NPA lf. It might not be provisioned");
1815                 goto otx2_dev_uninit;
1816         }
1817
1818         dev->drv_inited = true;
1819         dev->is_timeout_deq = 0;
1820         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1821         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1822         dev->max_num_events = -1;
1823         dev->nb_event_queues = 0;
1824         dev->nb_event_ports = 0;
1825
1826         if (!dev->max_event_ports || !dev->max_event_queues) {
1827                 otx2_err("Not enough eventdev resource queues=%d ports=%d",
1828                          dev->max_event_queues, dev->max_event_ports);
1829                 rc = -ENODEV;
1830                 goto otx2_npa_lf_uninit;
1831         }
1832
1833         dev->dual_ws = 1;
1834         sso_parse_devargs(dev, pci_dev->device.devargs);
1835         if (dev->dual_ws) {
1836                 otx2_sso_dbg("Using dual workslot mode");
1837                 dev->max_event_ports = dev->max_event_ports / 2;
1838         } else {
1839                 otx2_sso_dbg("Using single workslot mode");
1840         }
1841
1842         otx2_sso_pf_func_set(dev->pf_func);
1843         otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1844                      event_dev->data->name, dev->max_event_queues,
1845                      dev->max_event_ports);
1846
1847         otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1848
1849         return 0;
1850
1851 otx2_npa_lf_uninit:
1852         otx2_npa_lf_fini();
1853 otx2_dev_uninit:
1854         otx2_dev_fini(pci_dev, dev);
1855 error:
1856         return rc;
1857 }
1858
1859 int
1860 otx2_sso_fini(struct rte_eventdev *event_dev)
1861 {
1862         struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1863         struct rte_pci_device *pci_dev;
1864
1865         /* For secondary processes, nothing to be done */
1866         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1867                 return 0;
1868
1869         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1870
1871         if (!dev->drv_inited)
1872                 goto dev_fini;
1873
1874         dev->drv_inited = false;
1875         otx2_npa_lf_fini();
1876
1877 dev_fini:
1878         if (otx2_npa_lf_active(dev)) {
1879                 otx2_info("Common resource in use by other devices");
1880                 return -EAGAIN;
1881         }
1882
1883         otx2_tim_fini();
1884         otx2_dev_fini(pci_dev, dev);
1885
1886         return 0;
1887 }
1888
1889 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1890 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1891 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1892 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1893                               OTX2_SSO_SINGLE_WS "=1"
1894                               OTX2_SSO_GGRP_QOS "=<string>"
1895                               OTX2_NPA_LOCK_MASK "=<1-65535>");