1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
15 #include "otx2_evdev.h"
16 #include "otx2_evdev_crypto_adptr_tx.h"
17 #include "otx2_evdev_stats.h"
19 #include "otx2_tim_evdev.h"
22 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
24 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
25 uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
26 struct otx2_mbox *mbox = dev->mbox;
27 struct msix_offset_rsp *msix_rsp;
30 /* Get SSO and SSOW MSIX vector offsets */
31 otx2_mbox_alloc_msg_msix_offset(mbox);
32 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
34 for (i = 0; i < nb_ports; i++)
35 dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
37 for (i = 0; i < dev->nb_event_queues; i++)
38 dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
44 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
46 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
48 const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = {
49 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
50 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
51 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
55 const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = {
56 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
57 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
58 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
62 const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = {
63 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
64 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
65 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
69 const event_dequeue_burst_t
70 ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = {
71 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
72 [f6][f5][f4][f3][f2][f1][f0] = \
73 otx2_ssogws_deq_timeout_burst_ ##name,
74 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
78 const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = {
79 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
80 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
81 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
85 const event_dequeue_burst_t
86 ssogws_deq_seg_burst[2][2][2][2][2][2][2] = {
87 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
88 [f6][f5][f4][f3][f2][f1][f0] = \
89 otx2_ssogws_deq_seg_burst_ ##name,
90 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
94 const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = {
95 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
96 [f6][f5][f4][f3][f2][f1][f0] = \
97 otx2_ssogws_deq_seg_timeout_ ##name,
98 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
102 const event_dequeue_burst_t
103 ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
104 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
105 [f6][f5][f4][f3][f2][f1][f0] = \
106 otx2_ssogws_deq_seg_timeout_burst_ ##name,
107 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
113 const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = {
114 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
115 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
116 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
120 const event_dequeue_burst_t
121 ssogws_dual_deq_burst[2][2][2][2][2][2][2] = {
122 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
123 [f6][f5][f4][f3][f2][f1][f0] = \
124 otx2_ssogws_dual_deq_burst_ ##name,
125 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
129 const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = {
130 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
131 [f6][f5][f4][f3][f2][f1][f0] = \
132 otx2_ssogws_dual_deq_timeout_ ##name,
133 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
137 const event_dequeue_burst_t
138 ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = {
139 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
140 [f6][f5][f4][f3][f2][f1][f0] = \
141 otx2_ssogws_dual_deq_timeout_burst_ ##name,
142 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
146 const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = {
147 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
148 [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
149 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
153 const event_dequeue_burst_t
154 ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
155 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
156 [f6][f5][f4][f3][f2][f1][f0] = \
157 otx2_ssogws_dual_deq_seg_burst_ ##name,
158 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
162 const event_dequeue_t
163 ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = {
164 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
165 [f6][f5][f4][f3][f2][f1][f0] = \
166 otx2_ssogws_dual_deq_seg_timeout_ ##name,
167 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
171 const event_dequeue_burst_t
172 ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
173 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
174 [f6][f5][f4][f3][f2][f1][f0] = \
175 otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
176 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
181 const event_tx_adapter_enqueue_t
182 ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
183 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
184 [f6][f5][f4][f3][f2][f1][f0] = \
185 otx2_ssogws_tx_adptr_enq_ ## name,
186 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
190 const event_tx_adapter_enqueue_t
191 ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
192 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
193 [f6][f5][f4][f3][f2][f1][f0] = \
194 otx2_ssogws_tx_adptr_enq_seg_ ## name,
195 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
199 const event_tx_adapter_enqueue_t
200 ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
201 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
202 [f6][f5][f4][f3][f2][f1][f0] = \
203 otx2_ssogws_dual_tx_adptr_enq_ ## name,
204 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
208 const event_tx_adapter_enqueue_t
209 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
210 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
211 [f6][f5][f4][f3][f2][f1][f0] = \
212 otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
213 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
217 event_dev->enqueue = otx2_ssogws_enq;
218 event_dev->enqueue_burst = otx2_ssogws_enq_burst;
219 event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst;
220 event_dev->enqueue_forward_burst = otx2_ssogws_enq_fwd_burst;
221 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
222 event_dev->dequeue = ssogws_deq_seg
223 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
224 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
225 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
226 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
227 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
228 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
229 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
230 event_dev->dequeue_burst = ssogws_deq_seg_burst
231 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
232 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
233 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
234 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
235 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
236 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
237 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
238 if (dev->is_timeout_deq) {
239 event_dev->dequeue = ssogws_deq_seg_timeout
240 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
241 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
242 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
243 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
244 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
245 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
246 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
247 event_dev->dequeue_burst =
248 ssogws_deq_seg_timeout_burst
249 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
250 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
251 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
252 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
253 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
254 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
255 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
258 event_dev->dequeue = ssogws_deq
259 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
260 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
261 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
262 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
263 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
264 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
265 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
266 event_dev->dequeue_burst = ssogws_deq_burst
267 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
268 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
269 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
270 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
271 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
272 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
273 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
274 if (dev->is_timeout_deq) {
275 event_dev->dequeue = ssogws_deq_timeout
276 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
277 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
278 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
279 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
280 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
281 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
282 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
283 event_dev->dequeue_burst =
284 ssogws_deq_timeout_burst
285 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
286 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
287 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
288 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
289 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
290 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
291 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
295 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
296 /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
297 event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
298 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
299 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
300 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
301 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
302 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
303 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
304 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
306 event_dev->txa_enqueue = ssogws_tx_adptr_enq
307 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
308 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
309 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
310 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
311 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
312 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
313 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
315 event_dev->ca_enqueue = otx2_ssogws_ca_enq;
318 event_dev->enqueue = otx2_ssogws_dual_enq;
319 event_dev->enqueue_burst = otx2_ssogws_dual_enq_burst;
320 event_dev->enqueue_new_burst =
321 otx2_ssogws_dual_enq_new_burst;
322 event_dev->enqueue_forward_burst =
323 otx2_ssogws_dual_enq_fwd_burst;
325 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
326 event_dev->dequeue = ssogws_dual_deq_seg
327 [!!(dev->rx_offloads &
328 NIX_RX_OFFLOAD_SECURITY_F)]
329 [!!(dev->rx_offloads &
330 NIX_RX_OFFLOAD_TSTAMP_F)]
331 [!!(dev->rx_offloads &
332 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
333 [!!(dev->rx_offloads &
334 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
335 [!!(dev->rx_offloads &
336 NIX_RX_OFFLOAD_CHECKSUM_F)]
337 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
338 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
339 event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
340 [!!(dev->rx_offloads &
341 NIX_RX_OFFLOAD_SECURITY_F)]
342 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
343 [!!(dev->rx_offloads &
344 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
345 [!!(dev->rx_offloads &
346 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
347 [!!(dev->rx_offloads &
348 NIX_RX_OFFLOAD_CHECKSUM_F)]
349 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
350 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
351 if (dev->is_timeout_deq) {
353 ssogws_dual_deq_seg_timeout
354 [!!(dev->rx_offloads &
355 NIX_RX_OFFLOAD_SECURITY_F)]
356 [!!(dev->rx_offloads &
357 NIX_RX_OFFLOAD_TSTAMP_F)]
358 [!!(dev->rx_offloads &
359 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
360 [!!(dev->rx_offloads &
361 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
362 [!!(dev->rx_offloads &
363 NIX_RX_OFFLOAD_CHECKSUM_F)]
364 [!!(dev->rx_offloads &
365 NIX_RX_OFFLOAD_PTYPE_F)]
366 [!!(dev->rx_offloads &
367 NIX_RX_OFFLOAD_RSS_F)];
368 event_dev->dequeue_burst =
369 ssogws_dual_deq_seg_timeout_burst
370 [!!(dev->rx_offloads &
371 NIX_RX_OFFLOAD_SECURITY_F)]
372 [!!(dev->rx_offloads &
373 NIX_RX_OFFLOAD_TSTAMP_F)]
374 [!!(dev->rx_offloads &
375 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
376 [!!(dev->rx_offloads &
377 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
378 [!!(dev->rx_offloads &
379 NIX_RX_OFFLOAD_CHECKSUM_F)]
380 [!!(dev->rx_offloads &
381 NIX_RX_OFFLOAD_PTYPE_F)]
382 [!!(dev->rx_offloads &
383 NIX_RX_OFFLOAD_RSS_F)];
386 event_dev->dequeue = ssogws_dual_deq
387 [!!(dev->rx_offloads &
388 NIX_RX_OFFLOAD_SECURITY_F)]
389 [!!(dev->rx_offloads &
390 NIX_RX_OFFLOAD_TSTAMP_F)]
391 [!!(dev->rx_offloads &
392 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
393 [!!(dev->rx_offloads &
394 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
395 [!!(dev->rx_offloads &
396 NIX_RX_OFFLOAD_CHECKSUM_F)]
397 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
398 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
399 event_dev->dequeue_burst = ssogws_dual_deq_burst
400 [!!(dev->rx_offloads &
401 NIX_RX_OFFLOAD_SECURITY_F)]
402 [!!(dev->rx_offloads &
403 NIX_RX_OFFLOAD_TSTAMP_F)]
404 [!!(dev->rx_offloads &
405 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
406 [!!(dev->rx_offloads &
407 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
408 [!!(dev->rx_offloads &
409 NIX_RX_OFFLOAD_CHECKSUM_F)]
410 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
411 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
412 if (dev->is_timeout_deq) {
414 ssogws_dual_deq_timeout
415 [!!(dev->rx_offloads &
416 NIX_RX_OFFLOAD_SECURITY_F)]
417 [!!(dev->rx_offloads &
418 NIX_RX_OFFLOAD_TSTAMP_F)]
419 [!!(dev->rx_offloads &
420 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
421 [!!(dev->rx_offloads &
422 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
423 [!!(dev->rx_offloads &
424 NIX_RX_OFFLOAD_CHECKSUM_F)]
425 [!!(dev->rx_offloads &
426 NIX_RX_OFFLOAD_PTYPE_F)]
427 [!!(dev->rx_offloads &
428 NIX_RX_OFFLOAD_RSS_F)];
429 event_dev->dequeue_burst =
430 ssogws_dual_deq_timeout_burst
431 [!!(dev->rx_offloads &
432 NIX_RX_OFFLOAD_SECURITY_F)]
433 [!!(dev->rx_offloads &
434 NIX_RX_OFFLOAD_TSTAMP_F)]
435 [!!(dev->rx_offloads &
436 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
437 [!!(dev->rx_offloads &
438 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
439 [!!(dev->rx_offloads &
440 NIX_RX_OFFLOAD_CHECKSUM_F)]
441 [!!(dev->rx_offloads &
442 NIX_RX_OFFLOAD_PTYPE_F)]
443 [!!(dev->rx_offloads &
444 NIX_RX_OFFLOAD_RSS_F)];
448 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
449 /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
450 event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
451 [!!(dev->tx_offloads &
452 NIX_TX_OFFLOAD_SECURITY_F)]
453 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
454 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
455 [!!(dev->tx_offloads &
456 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
457 [!!(dev->tx_offloads &
458 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
459 [!!(dev->tx_offloads &
460 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
461 [!!(dev->tx_offloads &
462 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
464 event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
465 [!!(dev->tx_offloads &
466 NIX_TX_OFFLOAD_SECURITY_F)]
467 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
468 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
469 [!!(dev->tx_offloads &
470 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
471 [!!(dev->tx_offloads &
472 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
473 [!!(dev->tx_offloads &
474 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
475 [!!(dev->tx_offloads &
476 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
478 event_dev->ca_enqueue = otx2_ssogws_dual_ca_enq;
481 event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
486 otx2_sso_info_get(struct rte_eventdev *event_dev,
487 struct rte_event_dev_info *dev_info)
489 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
491 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
492 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
493 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
494 dev_info->max_event_queues = dev->max_event_queues;
495 dev_info->max_event_queue_flows = (1ULL << 20);
496 dev_info->max_event_queue_priority_levels = 8;
497 dev_info->max_event_priority_levels = 1;
498 dev_info->max_event_ports = dev->max_event_ports;
499 dev_info->max_event_port_dequeue_depth = 1;
500 dev_info->max_event_port_enqueue_depth = 1;
501 dev_info->max_num_events = dev->max_num_events;
502 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
503 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
504 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
505 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
506 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
507 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
508 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
512 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
514 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
518 val |= 0ULL << 12; /* SET 0 */
519 val |= 0x8000800080000000; /* Dont modify rest of the masks */
520 val |= (uint64_t)enable << 14; /* Enable/Disable Membership. */
522 otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
526 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
527 const uint8_t queues[], const uint8_t priorities[],
530 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
534 RTE_SET_USED(priorities);
535 for (link = 0; link < nb_links; link++) {
537 struct otx2_ssogws_dual *ws = port;
540 sso_port_link_modify((struct otx2_ssogws *)
541 &ws->ws_state[0], queues[link], true);
542 sso_port_link_modify((struct otx2_ssogws *)
543 &ws->ws_state[1], queues[link], true);
545 struct otx2_ssogws *ws = port;
548 sso_port_link_modify(ws, queues[link], true);
551 sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
553 return (int)nb_links;
557 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
558 uint8_t queues[], uint16_t nb_unlinks)
560 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
564 for (unlink = 0; unlink < nb_unlinks; unlink++) {
566 struct otx2_ssogws_dual *ws = port;
569 sso_port_link_modify((struct otx2_ssogws *)
570 &ws->ws_state[0], queues[unlink],
572 sso_port_link_modify((struct otx2_ssogws *)
573 &ws->ws_state[1], queues[unlink],
576 struct otx2_ssogws *ws = port;
579 sso_port_link_modify(ws, queues[unlink], false);
582 sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
584 return (int)nb_unlinks;
588 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
589 uint16_t nb_lf, uint8_t attach)
592 struct rsrc_attach_req *req;
594 req = otx2_mbox_alloc_msg_attach_resources(mbox);
606 if (otx2_mbox_process(mbox) < 0)
609 struct rsrc_detach_req *req;
611 req = otx2_mbox_alloc_msg_detach_resources(mbox);
623 if (otx2_mbox_process(mbox) < 0)
631 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
632 enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
641 struct sso_lf_alloc_req *req_ggrp;
642 req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
643 req_ggrp->hwgrps = nb_lf;
648 struct ssow_lf_alloc_req *req_hws;
649 req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
650 req_hws->hws = nb_lf;
660 struct sso_lf_free_req *req_ggrp;
661 req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
662 req_ggrp->hwgrps = nb_lf;
667 struct ssow_lf_free_req *req_hws;
668 req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
669 req_hws->hws = nb_lf;
677 rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
681 if (alloc && type == SSO_LF_GGRP) {
682 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
684 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
685 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
686 dev->iue = rsp_ggrp->in_unit_entries;
693 otx2_sso_port_release(void *port)
695 struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
696 struct otx2_sso_evdev *dev;
699 if (!gws_cookie->configured)
702 dev = sso_pmd_priv(gws_cookie->event_dev);
704 struct otx2_ssogws_dual *ws = port;
706 for (i = 0; i < dev->nb_event_queues; i++) {
707 sso_port_link_modify((struct otx2_ssogws *)
708 &ws->ws_state[0], i, false);
709 sso_port_link_modify((struct otx2_ssogws *)
710 &ws->ws_state[1], i, false);
712 memset(ws, 0, sizeof(*ws));
714 struct otx2_ssogws *ws = port;
716 for (i = 0; i < dev->nb_event_queues; i++)
717 sso_port_link_modify(ws, i, false);
718 memset(ws, 0, sizeof(*ws));
721 memset(gws_cookie, 0, sizeof(*gws_cookie));
724 rte_free(gws_cookie);
728 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
730 RTE_SET_USED(event_dev);
731 RTE_SET_USED(queue_id);
735 sso_restore_links(const struct rte_eventdev *event_dev)
737 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
741 for (i = 0; i < dev->nb_event_ports; i++) {
742 links_map = event_dev->data->links_map;
743 /* Point links_map to this port specific area */
744 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
746 struct otx2_ssogws_dual *ws;
748 ws = event_dev->data->ports[i];
749 for (j = 0; j < dev->nb_event_queues; j++) {
750 if (links_map[j] == 0xdead)
752 sso_port_link_modify((struct otx2_ssogws *)
753 &ws->ws_state[0], j, true);
754 sso_port_link_modify((struct otx2_ssogws *)
755 &ws->ws_state[1], j, true);
756 sso_func_trace("Restoring port %d queue %d "
760 struct otx2_ssogws *ws;
762 ws = event_dev->data->ports[i];
763 for (j = 0; j < dev->nb_event_queues; j++) {
764 if (links_map[j] == 0xdead)
766 sso_port_link_modify(ws, j, true);
767 sso_func_trace("Restoring port %d queue %d "
775 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
777 ws->tag_op = base + SSOW_LF_GWS_TAG;
778 ws->wqp_op = base + SSOW_LF_GWS_WQP;
779 ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK;
780 ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
781 ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
782 ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
786 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
788 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
789 struct otx2_mbox *mbox = dev->mbox;
794 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
796 nb_lf = dev->nb_event_ports * 2;
797 /* Ask AF to attach required LFs. */
798 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
800 otx2_err("Failed to attach SSO GWS LF");
804 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
805 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
806 otx2_err("Failed to init SSO GWS LF");
810 for (i = 0; i < dev->nb_event_ports; i++) {
811 struct otx2_ssogws_cookie *gws_cookie;
812 struct otx2_ssogws_dual *ws;
815 if (event_dev->data->ports[i] != NULL) {
816 ws = event_dev->data->ports[i];
818 /* Allocate event port memory */
819 ws = rte_zmalloc_socket("otx2_sso_ws",
820 sizeof(struct otx2_ssogws_dual) +
823 event_dev->data->socket_id);
825 otx2_err("Failed to alloc memory for port=%d",
831 /* First cache line is reserved for cookie */
832 ws = (struct otx2_ssogws_dual *)
833 ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
837 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
838 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
842 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
843 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
847 gws_cookie = ssogws_get_cookie(ws);
848 gws_cookie->event_dev = event_dev;
849 gws_cookie->configured = 1;
851 event_dev->data->ports[i] = ws;
855 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
856 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
863 sso_configure_ports(const struct rte_eventdev *event_dev)
865 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
866 struct otx2_mbox *mbox = dev->mbox;
870 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
872 nb_lf = dev->nb_event_ports;
873 /* Ask AF to attach required LFs. */
874 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
876 otx2_err("Failed to attach SSO GWS LF");
880 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
881 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
882 otx2_err("Failed to init SSO GWS LF");
886 for (i = 0; i < nb_lf; i++) {
887 struct otx2_ssogws_cookie *gws_cookie;
888 struct otx2_ssogws *ws;
891 if (event_dev->data->ports[i] != NULL) {
892 ws = event_dev->data->ports[i];
894 /* Allocate event port memory */
895 ws = rte_zmalloc_socket("otx2_sso_ws",
896 sizeof(struct otx2_ssogws) +
899 event_dev->data->socket_id);
901 otx2_err("Failed to alloc memory for port=%d",
907 /* First cache line is reserved for cookie */
908 ws = (struct otx2_ssogws *)
909 ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
913 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
914 sso_set_port_ops(ws, base);
917 gws_cookie = ssogws_get_cookie(ws);
918 gws_cookie->event_dev = event_dev;
919 gws_cookie->configured = 1;
921 event_dev->data->ports[i] = ws;
925 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
926 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
933 sso_configure_queues(const struct rte_eventdev *event_dev)
935 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
936 struct otx2_mbox *mbox = dev->mbox;
940 otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
942 nb_lf = dev->nb_event_queues;
943 /* Ask AF to attach required LFs. */
944 rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
946 otx2_err("Failed to attach SSO GGRP LF");
950 if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
951 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
952 otx2_err("Failed to init SSO GGRP LF");
960 sso_xaq_allocate(struct otx2_sso_evdev *dev)
962 const struct rte_memzone *mz;
963 struct npa_aura_s *aura;
964 static int reconfig_cnt;
965 char pool_name[RTE_MEMZONE_NAMESIZE];
970 rte_mempool_free(dev->xaq_pool);
973 * Allocate memory for Add work backpressure.
975 mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
977 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
979 sizeof(struct npa_aura_s),
981 RTE_MEMZONE_IOVA_CONTIG,
984 otx2_err("Failed to allocate mem for fcmem");
988 dev->fc_iova = mz->iova;
989 dev->fc_mem = mz->addr;
991 aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
992 memset(aura, 0, sizeof(struct npa_aura_s));
995 aura->fc_addr = dev->fc_iova;
996 aura->fc_hyst_bits = 0; /* Store count on all updates */
998 /* Taken from HRM 14.3.3(4) */
999 xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
1001 xaq_cnt += dev->xae_cnt / dev->xae_waes;
1002 else if (dev->adptr_xae_cnt)
1003 xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
1004 (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
1006 xaq_cnt += (dev->iue / dev->xae_waes) +
1007 (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
1009 otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
1010 /* Setup XAQ based on number of nb queues. */
1011 snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
1012 dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
1013 xaq_cnt, dev->xaq_buf_size, 0, 0,
1014 rte_socket_id(), 0);
1016 if (dev->xaq_pool == NULL) {
1017 otx2_err("Unable to create empty mempool.");
1018 rte_memzone_free(mz);
1022 rc = rte_mempool_set_ops_byname(dev->xaq_pool,
1023 rte_mbuf_platform_mempool_ops(), aura);
1025 otx2_err("Unable to set xaqpool ops.");
1029 rc = rte_mempool_populate_default(dev->xaq_pool);
1031 otx2_err("Unable to set populate xaqpool.");
1035 /* When SW does addwork (enqueue) check if there is space in XAQ by
1036 * comparing fc_addr above against the xaq_lmt calculated below.
1037 * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
1038 * to request XAQ to cache them even before enqueue is called.
1040 dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
1041 dev->nb_event_queues);
1042 dev->nb_xaq_cfg = xaq_cnt;
1046 rte_mempool_free(dev->xaq_pool);
1047 rte_memzone_free(mz);
1052 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
1054 struct otx2_mbox *mbox = dev->mbox;
1055 struct sso_hw_setconfig *req;
1057 otx2_sso_dbg("Configuring XAQ for GGRPs");
1058 req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
1059 req->npa_pf_func = otx2_npa_pf_func_get();
1060 req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
1061 req->hwgrps = dev->nb_event_queues;
1063 return otx2_mbox_process(mbox);
1067 sso_ggrp_free_xaq(struct otx2_sso_evdev *dev)
1069 struct otx2_mbox *mbox = dev->mbox;
1070 struct sso_release_xaq *req;
1072 otx2_sso_dbg("Freeing XAQ for GGRPs");
1073 req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox);
1074 req->hwgrps = dev->nb_event_queues;
1076 return otx2_mbox_process(mbox);
1080 sso_lf_teardown(struct otx2_sso_evdev *dev,
1081 enum otx2_sso_lf_type lf_type)
1087 nb_lf = dev->nb_event_queues;
1090 nb_lf = dev->nb_event_ports;
1091 nb_lf *= dev->dual_ws ? 2 : 1;
1097 sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
1098 sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
1102 otx2_sso_configure(const struct rte_eventdev *event_dev)
1104 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
1105 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1106 uint32_t deq_tmo_ns;
1110 deq_tmo_ns = conf->dequeue_timeout_ns;
1112 if (deq_tmo_ns == 0)
1113 deq_tmo_ns = dev->min_dequeue_timeout_ns;
1115 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
1116 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
1117 otx2_err("Unsupported dequeue timeout requested");
1121 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
1122 dev->is_timeout_deq = 1;
1124 dev->deq_tmo_ns = deq_tmo_ns;
1126 if (conf->nb_event_ports > dev->max_event_ports ||
1127 conf->nb_event_queues > dev->max_event_queues) {
1128 otx2_err("Unsupported event queues/ports requested");
1132 if (conf->nb_event_port_dequeue_depth > 1) {
1133 otx2_err("Unsupported event port deq depth requested");
1137 if (conf->nb_event_port_enqueue_depth > 1) {
1138 otx2_err("Unsupported event port enq depth requested");
1142 if (dev->configured)
1143 sso_unregister_irqs(event_dev);
1145 if (dev->nb_event_queues) {
1146 /* Finit any previous queues. */
1147 sso_lf_teardown(dev, SSO_LF_GGRP);
1149 if (dev->nb_event_ports) {
1150 /* Finit any previous ports. */
1151 sso_lf_teardown(dev, SSO_LF_GWS);
1154 dev->nb_event_queues = conf->nb_event_queues;
1155 dev->nb_event_ports = conf->nb_event_ports;
1158 rc = sso_configure_dual_ports(event_dev);
1160 rc = sso_configure_ports(event_dev);
1163 otx2_err("Failed to configure event ports");
1167 if (sso_configure_queues(event_dev) < 0) {
1168 otx2_err("Failed to configure event queues");
1173 if (sso_xaq_allocate(dev) < 0) {
1175 goto teardown_hwggrp;
1178 /* Restore any prior port-queue mapping. */
1179 sso_restore_links(event_dev);
1180 rc = sso_ggrp_alloc_xaq(dev);
1182 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1183 goto teardown_hwggrp;
1186 rc = sso_get_msix_offsets(event_dev);
1188 otx2_err("Failed to get msix offsets %d", rc);
1189 goto teardown_hwggrp;
1192 rc = sso_register_irqs(event_dev);
1194 otx2_err("Failed to register irq %d", rc);
1195 goto teardown_hwggrp;
1198 dev->configured = 1;
1203 sso_lf_teardown(dev, SSO_LF_GGRP);
1205 sso_lf_teardown(dev, SSO_LF_GWS);
1206 dev->nb_event_queues = 0;
1207 dev->nb_event_ports = 0;
1208 dev->configured = 0;
1213 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1214 struct rte_event_queue_conf *queue_conf)
1216 RTE_SET_USED(event_dev);
1217 RTE_SET_USED(queue_id);
1219 queue_conf->nb_atomic_flows = (1ULL << 20);
1220 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1221 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1222 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1226 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1227 const struct rte_event_queue_conf *queue_conf)
1229 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1230 struct otx2_mbox *mbox = dev->mbox;
1231 struct sso_grp_priority *req;
1234 sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1236 req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1237 req->grp = queue_id;
1239 req->affinity = 0xFF;
1240 /* Normalize <0-255> to <0-7> */
1241 req->priority = queue_conf->priority / 32;
1243 rc = otx2_mbox_process(mbox);
1245 otx2_err("Failed to set priority queue=%d", queue_id);
1253 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1254 struct rte_event_port_conf *port_conf)
1256 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1258 RTE_SET_USED(port_id);
1259 port_conf->new_event_threshold = dev->max_num_events;
1260 port_conf->dequeue_depth = 1;
1261 port_conf->enqueue_depth = 1;
1265 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1266 const struct rte_event_port_conf *port_conf)
1268 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1269 uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1273 sso_func_trace("Port=%d", port_id);
1274 RTE_SET_USED(port_conf);
1276 if (event_dev->data->ports[port_id] == NULL) {
1277 otx2_err("Invalid port Id %d", port_id);
1281 for (q = 0; q < dev->nb_event_queues; q++) {
1282 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1283 if (grps_base[q] == 0) {
1284 otx2_err("Failed to get grp[%d] base addr", q);
1289 /* Set get_work timeout for HWS */
1290 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1293 struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1295 rte_memcpy(ws->grps_base, grps_base,
1296 sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1297 ws->fc_mem = dev->fc_mem;
1298 ws->xaq_lmt = dev->xaq_lmt;
1299 ws->tstamp = dev->tstamp;
1300 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1301 ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1302 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1303 ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1305 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1306 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1308 rte_memcpy(ws->grps_base, grps_base,
1309 sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1310 ws->fc_mem = dev->fc_mem;
1311 ws->xaq_lmt = dev->xaq_lmt;
1312 ws->tstamp = dev->tstamp;
1313 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1316 otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1322 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1323 uint64_t *tmo_ticks)
1325 RTE_SET_USED(event_dev);
1326 *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1332 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1334 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1336 fprintf(f, "SSOW_LF_GWS Base addr 0x%" PRIx64 "\n", (uint64_t)base);
1337 fprintf(f, "SSOW_LF_GWS_LINKS 0x%" PRIx64 "\n",
1338 otx2_read64(base + SSOW_LF_GWS_LINKS));
1339 fprintf(f, "SSOW_LF_GWS_PENDWQP 0x%" PRIx64 "\n",
1340 otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1341 fprintf(f, "SSOW_LF_GWS_PENDSTATE 0x%" PRIx64 "\n",
1342 otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1343 fprintf(f, "SSOW_LF_GWS_NW_TIM 0x%" PRIx64 "\n",
1344 otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1345 fprintf(f, "SSOW_LF_GWS_TAG 0x%" PRIx64 "\n",
1346 otx2_read64(base + SSOW_LF_GWS_TAG));
1347 fprintf(f, "SSOW_LF_GWS_WQP 0x%" PRIx64 "\n",
1348 otx2_read64(base + SSOW_LF_GWS_TAG));
1349 fprintf(f, "SSOW_LF_GWS_SWTP 0x%" PRIx64 "\n",
1350 otx2_read64(base + SSOW_LF_GWS_SWTP));
1351 fprintf(f, "SSOW_LF_GWS_PENDTAG 0x%" PRIx64 "\n",
1352 otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1356 ssoggrp_dump(uintptr_t base, FILE *f)
1358 fprintf(f, "SSO_LF_GGRP Base addr 0x%" PRIx64 "\n", (uint64_t)base);
1359 fprintf(f, "SSO_LF_GGRP_QCTL 0x%" PRIx64 "\n",
1360 otx2_read64(base + SSO_LF_GGRP_QCTL));
1361 fprintf(f, "SSO_LF_GGRP_XAQ_CNT 0x%" PRIx64 "\n",
1362 otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1363 fprintf(f, "SSO_LF_GGRP_INT_THR 0x%" PRIx64 "\n",
1364 otx2_read64(base + SSO_LF_GGRP_INT_THR));
1365 fprintf(f, "SSO_LF_GGRP_INT_CNT 0x%" PRIX64 "\n",
1366 otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1367 fprintf(f, "SSO_LF_GGRP_AQ_CNT 0x%" PRIX64 "\n",
1368 otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1369 fprintf(f, "SSO_LF_GGRP_AQ_THR 0x%" PRIX64 "\n",
1370 otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1371 fprintf(f, "SSO_LF_GGRP_MISC_CNT 0x%" PRIx64 "\n",
1372 otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1376 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1378 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1382 fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1383 "dual_ws" : "single_ws");
1384 /* Dump SSOW registers */
1385 for (port = 0; port < dev->nb_event_ports; port++) {
1387 struct otx2_ssogws_dual *ws =
1388 event_dev->data->ports[port];
1390 fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1392 ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1393 fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1395 ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1397 fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1399 ssogws_dump(event_dev->data->ports[port], f);
1403 /* Dump SSO registers */
1404 for (queue = 0; queue < dev->nb_event_queues; queue++) {
1405 fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1407 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1408 ssoggrp_dump(ws->grps_base[queue], f);
1410 struct otx2_ssogws *ws = event_dev->data->ports[0];
1411 ssoggrp_dump(ws->grps_base[queue], f);
1417 otx2_handle_event(void *arg, struct rte_event event)
1419 struct rte_eventdev *event_dev = arg;
1421 if (event_dev->dev_ops->dev_stop_flush != NULL)
1422 event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1423 event, event_dev->data->dev_stop_flush_arg);
1427 sso_qos_cfg(struct rte_eventdev *event_dev)
1429 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1430 struct sso_grp_qos_cfg *req;
1433 for (i = 0; i < dev->qos_queue_cnt; i++) {
1434 uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1435 uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1436 uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1438 if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1441 req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1442 req->xaq_limit = (dev->nb_xaq_cfg *
1443 (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1444 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1445 (iaq_prcnt ? iaq_prcnt : 100)) / 100;
1446 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1447 (taq_prcnt ? taq_prcnt : 100)) / 100;
1450 if (dev->qos_queue_cnt)
1451 otx2_mbox_process(dev->mbox);
1455 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1457 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1460 for (i = 0; i < dev->nb_event_ports; i++) {
1462 struct otx2_ssogws_dual *ws;
1464 ws = event_dev->data->ports[i];
1465 ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1466 ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1469 ws->fc_mem = dev->fc_mem;
1470 ws->xaq_lmt = dev->xaq_lmt;
1472 struct otx2_ssogws *ws;
1474 ws = event_dev->data->ports[i];
1477 ws->fc_mem = dev->fc_mem;
1478 ws->xaq_lmt = dev->xaq_lmt;
1484 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1485 struct otx2_ssogws temp_ws;
1487 memcpy(&temp_ws, &ws->ws_state[0],
1488 sizeof(struct otx2_ssogws_state));
1489 for (i = 0; i < dev->nb_event_queues; i++) {
1490 /* Consume all the events through HWS0 */
1491 ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1492 otx2_handle_event, event_dev);
1493 /* Enable/Disable SSO GGRP */
1494 otx2_write64(enable, ws->grps_base[i] +
1498 struct otx2_ssogws *ws = event_dev->data->ports[0];
1500 for (i = 0; i < dev->nb_event_queues; i++) {
1501 /* Consume all the events through HWS0 */
1502 ssogws_flush_events(ws, i, ws->grps_base[i],
1503 otx2_handle_event, event_dev);
1504 /* Enable/Disable SSO GGRP */
1505 otx2_write64(enable, ws->grps_base[i] +
1510 /* reset SSO GWS cache */
1511 otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1512 otx2_mbox_process(dev->mbox);
1516 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1518 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1521 if (event_dev->data->dev_started)
1522 sso_cleanup(event_dev, 0);
1524 rc = sso_ggrp_free_xaq(dev);
1526 otx2_err("Failed to free XAQ\n");
1530 rte_mempool_free(dev->xaq_pool);
1531 dev->xaq_pool = NULL;
1532 rc = sso_xaq_allocate(dev);
1534 otx2_err("Failed to alloc xaq pool %d", rc);
1537 rc = sso_ggrp_alloc_xaq(dev);
1539 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1544 if (event_dev->data->dev_started)
1545 sso_cleanup(event_dev, 1);
1551 otx2_sso_start(struct rte_eventdev *event_dev)
1554 sso_qos_cfg(event_dev);
1555 sso_cleanup(event_dev, 1);
1556 sso_fastpath_fns_set(event_dev);
1562 otx2_sso_stop(struct rte_eventdev *event_dev)
1565 sso_cleanup(event_dev, 0);
1570 otx2_sso_close(struct rte_eventdev *event_dev)
1572 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1573 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1576 if (!dev->configured)
1579 sso_unregister_irqs(event_dev);
1581 for (i = 0; i < dev->nb_event_queues; i++)
1584 for (i = 0; i < dev->nb_event_ports; i++)
1585 otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1586 all_queues, dev->nb_event_queues);
1588 sso_lf_teardown(dev, SSO_LF_GGRP);
1589 sso_lf_teardown(dev, SSO_LF_GWS);
1590 dev->nb_event_ports = 0;
1591 dev->nb_event_queues = 0;
1592 rte_mempool_free(dev->xaq_pool);
1593 rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1598 /* Initialize and register event driver with DPDK Application */
1599 static struct eventdev_ops otx2_sso_ops = {
1600 .dev_infos_get = otx2_sso_info_get,
1601 .dev_configure = otx2_sso_configure,
1602 .queue_def_conf = otx2_sso_queue_def_conf,
1603 .queue_setup = otx2_sso_queue_setup,
1604 .queue_release = otx2_sso_queue_release,
1605 .port_def_conf = otx2_sso_port_def_conf,
1606 .port_setup = otx2_sso_port_setup,
1607 .port_release = otx2_sso_port_release,
1608 .port_link = otx2_sso_port_link,
1609 .port_unlink = otx2_sso_port_unlink,
1610 .timeout_ticks = otx2_sso_timeout_ticks,
1612 .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get,
1613 .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1614 .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1615 .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1616 .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1618 .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1619 .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1620 .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1622 .timer_adapter_caps_get = otx2_tim_caps_get,
1624 .crypto_adapter_caps_get = otx2_ca_caps_get,
1625 .crypto_adapter_queue_pair_add = otx2_ca_qp_add,
1626 .crypto_adapter_queue_pair_del = otx2_ca_qp_del,
1628 .xstats_get = otx2_sso_xstats_get,
1629 .xstats_reset = otx2_sso_xstats_reset,
1630 .xstats_get_names = otx2_sso_xstats_get_names,
1632 .dump = otx2_sso_dump,
1633 .dev_start = otx2_sso_start,
1634 .dev_stop = otx2_sso_stop,
1635 .dev_close = otx2_sso_close,
1636 .dev_selftest = otx2_sso_selftest,
1639 #define OTX2_SSO_XAE_CNT "xae_cnt"
1640 #define OTX2_SSO_SINGLE_WS "single_ws"
1641 #define OTX2_SSO_GGRP_QOS "qos"
1642 #define OTX2_SSO_FORCE_BP "force_rx_bp"
1645 parse_queue_param(char *value, void *opaque)
1647 struct otx2_sso_qos queue_qos = {0};
1648 uint8_t *val = (uint8_t *)&queue_qos;
1649 struct otx2_sso_evdev *dev = opaque;
1650 char *tok = strtok(value, "-");
1651 struct otx2_sso_qos *old_ptr;
1656 while (tok != NULL) {
1658 tok = strtok(NULL, "-");
1662 if (val != (&queue_qos.iaq_prcnt + 1)) {
1663 otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1667 dev->qos_queue_cnt++;
1668 old_ptr = dev->qos_parse_data;
1669 dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1670 sizeof(struct otx2_sso_qos) *
1671 dev->qos_queue_cnt, 0);
1672 if (dev->qos_parse_data == NULL) {
1673 dev->qos_parse_data = old_ptr;
1674 dev->qos_queue_cnt--;
1677 dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1681 parse_qos_list(const char *value, void *opaque)
1683 char *s = strdup(value);
1694 if (start && start < end) {
1696 parse_queue_param(start + 1, opaque);
1707 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1711 /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1712 * isn't allowed. Everything is expressed in percentages, 0 represents
1715 parse_qos_list(value, opaque);
1721 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1723 struct rte_kvargs *kvlist;
1724 uint8_t single_ws = 0;
1726 if (devargs == NULL)
1728 kvlist = rte_kvargs_parse(devargs->args, NULL);
1732 rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1734 rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1736 rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1738 rte_kvargs_process(kvlist, OTX2_SSO_FORCE_BP, &parse_kvargs_flag,
1740 otx2_parse_common_devargs(kvlist);
1741 dev->dual_ws = !single_ws;
1742 rte_kvargs_free(kvlist);
1746 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1748 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1749 sizeof(struct otx2_sso_evdev),
1754 otx2_sso_remove(struct rte_pci_device *pci_dev)
1756 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1759 static const struct rte_pci_id pci_sso_map[] = {
1761 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1762 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1769 static struct rte_pci_driver pci_sso = {
1770 .id_table = pci_sso_map,
1771 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1772 .probe = otx2_sso_probe,
1773 .remove = otx2_sso_remove,
1777 otx2_sso_init(struct rte_eventdev *event_dev)
1779 struct free_rsrcs_rsp *rsrc_cnt;
1780 struct rte_pci_device *pci_dev;
1781 struct otx2_sso_evdev *dev;
1784 event_dev->dev_ops = &otx2_sso_ops;
1785 /* For secondary processes, the primary has done all the work */
1786 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1787 sso_fastpath_fns_set(event_dev);
1791 dev = sso_pmd_priv(event_dev);
1793 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1795 /* Initialize the base otx2_dev object */
1796 rc = otx2_dev_init(pci_dev, dev);
1798 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1802 /* Get SSO and SSOW MSIX rsrc cnt */
1803 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1804 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1806 otx2_err("Unable to get free rsrc count");
1807 goto otx2_dev_uninit;
1809 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1810 rsrc_cnt->ssow, rsrc_cnt->npa);
1812 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1813 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1814 /* Grab the NPA LF if required */
1815 rc = otx2_npa_lf_init(pci_dev, dev);
1817 otx2_err("Unable to init NPA lf. It might not be provisioned");
1818 goto otx2_dev_uninit;
1821 dev->drv_inited = true;
1822 dev->is_timeout_deq = 0;
1823 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1824 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1825 dev->max_num_events = -1;
1826 dev->nb_event_queues = 0;
1827 dev->nb_event_ports = 0;
1829 if (!dev->max_event_ports || !dev->max_event_queues) {
1830 otx2_err("Not enough eventdev resource queues=%d ports=%d",
1831 dev->max_event_queues, dev->max_event_ports);
1833 goto otx2_npa_lf_uninit;
1837 sso_parse_devargs(dev, pci_dev->device.devargs);
1839 otx2_sso_dbg("Using dual workslot mode");
1840 dev->max_event_ports = dev->max_event_ports / 2;
1842 otx2_sso_dbg("Using single workslot mode");
1845 otx2_sso_pf_func_set(dev->pf_func);
1846 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1847 event_dev->data->name, dev->max_event_queues,
1848 dev->max_event_ports);
1850 otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1857 otx2_dev_fini(pci_dev, dev);
1863 otx2_sso_fini(struct rte_eventdev *event_dev)
1865 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1866 struct rte_pci_device *pci_dev;
1868 /* For secondary processes, nothing to be done */
1869 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1872 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1874 if (!dev->drv_inited)
1877 dev->drv_inited = false;
1881 if (otx2_npa_lf_active(dev)) {
1882 otx2_info("Common resource in use by other devices");
1887 otx2_dev_fini(pci_dev, dev);
1892 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1893 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1894 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1895 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1896 OTX2_SSO_SINGLE_WS "=1"
1897 OTX2_SSO_GGRP_QOS "=<string>"
1898 OTX2_SSO_FORCE_BP "=1"
1899 OTX2_NPA_LOCK_MASK "=<1-65535>");