examples/l3fwd: share queue size variables
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev_adptr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_ethdev.h"
6 #include "cnxk_eventdev.h"
7
8 void
9 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
10                       uint32_t event_type)
11 {
12         int i;
13
14         switch (event_type) {
15         case RTE_EVENT_TYPE_ETHDEV: {
16                 struct cnxk_eth_rxq_sp *rxq = data;
17                 uint64_t *old_ptr;
18
19                 for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
20                         if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
21                                 return;
22                 }
23
24                 dev->rx_adptr_pool_cnt++;
25                 old_ptr = dev->rx_adptr_pools;
26                 dev->rx_adptr_pools = rte_realloc(
27                         dev->rx_adptr_pools,
28                         sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
29                 if (dev->rx_adptr_pools == NULL) {
30                         dev->adptr_xae_cnt += rxq->qconf.mp->size;
31                         dev->rx_adptr_pools = old_ptr;
32                         dev->rx_adptr_pool_cnt--;
33                         return;
34                 }
35                 dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
36                         (uint64_t)rxq->qconf.mp;
37
38                 dev->adptr_xae_cnt += rxq->qconf.mp->size;
39                 break;
40         }
41         case RTE_EVENT_TYPE_ETHDEV_VECTOR: {
42                 struct rte_mempool *mp = data;
43                 uint64_t *old_ptr;
44
45                 for (i = 0; i < dev->vec_pool_cnt; i++) {
46                         if ((uint64_t)mp == dev->vec_pools[i])
47                                 return;
48                 }
49
50                 dev->vec_pool_cnt++;
51                 old_ptr = dev->vec_pools;
52                 dev->vec_pools =
53                         rte_realloc(dev->vec_pools,
54                                     sizeof(uint64_t) * dev->vec_pool_cnt, 0);
55                 if (dev->vec_pools == NULL) {
56                         dev->adptr_xae_cnt += mp->size;
57                         dev->vec_pools = old_ptr;
58                         dev->vec_pool_cnt--;
59                         return;
60                 }
61                 dev->vec_pools[dev->vec_pool_cnt - 1] = (uint64_t)mp;
62
63                 dev->adptr_xae_cnt += mp->size;
64                 break;
65         }
66         case RTE_EVENT_TYPE_TIMER: {
67                 struct cnxk_tim_ring *timr = data;
68                 uint16_t *old_ring_ptr;
69                 uint64_t *old_sz_ptr;
70
71                 for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
72                         if (timr->ring_id != dev->timer_adptr_rings[i])
73                                 continue;
74                         if (timr->nb_timers == dev->timer_adptr_sz[i])
75                                 return;
76                         dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
77                         dev->adptr_xae_cnt += timr->nb_timers;
78                         dev->timer_adptr_sz[i] = timr->nb_timers;
79
80                         return;
81                 }
82
83                 dev->tim_adptr_ring_cnt++;
84                 old_ring_ptr = dev->timer_adptr_rings;
85                 old_sz_ptr = dev->timer_adptr_sz;
86
87                 dev->timer_adptr_rings = rte_realloc(
88                         dev->timer_adptr_rings,
89                         sizeof(uint16_t) * dev->tim_adptr_ring_cnt, 0);
90                 if (dev->timer_adptr_rings == NULL) {
91                         dev->adptr_xae_cnt += timr->nb_timers;
92                         dev->timer_adptr_rings = old_ring_ptr;
93                         dev->tim_adptr_ring_cnt--;
94                         return;
95                 }
96
97                 dev->timer_adptr_sz = rte_realloc(
98                         dev->timer_adptr_sz,
99                         sizeof(uint64_t) * dev->tim_adptr_ring_cnt, 0);
100
101                 if (dev->timer_adptr_sz == NULL) {
102                         dev->adptr_xae_cnt += timr->nb_timers;
103                         dev->timer_adptr_sz = old_sz_ptr;
104                         dev->tim_adptr_ring_cnt--;
105                         return;
106                 }
107
108                 dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
109                         timr->ring_id;
110                 dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
111                         timr->nb_timers;
112
113                 dev->adptr_xae_cnt += timr->nb_timers;
114                 break;
115         }
116         default:
117                 break;
118         }
119 }
120
121 static int
122 cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
123                     uint16_t port_id, const struct rte_event *ev,
124                     uint8_t custom_flowid)
125 {
126         struct roc_nix *nix = &cnxk_eth_dev->nix;
127         struct roc_nix_rq *rq;
128         int rc;
129
130         rq = &cnxk_eth_dev->rqs[rq_id];
131         rq->sso_ena = 1;
132         rq->tt = ev->sched_type;
133         rq->hwgrp = ev->queue_id;
134         rq->flow_tag_width = 20;
135         rq->wqe_skip = 1;
136         rq->tag_mask = (port_id & 0xF) << 20;
137         rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
138                         << 24;
139
140         if (custom_flowid) {
141                 rq->flow_tag_width = 0;
142                 rq->tag_mask |= ev->flow_id;
143         }
144
145         rc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
146         if (rc)
147                 return rc;
148
149         if (rq_id == 0 && roc_nix_inl_inb_is_enabled(nix)) {
150                 uint32_t sec_tag_const;
151
152                 /* IPSec tag const is 8-bit left shifted value of tag_mask
153                  * as it applies to bit 32:8 of tag only.
154                  */
155                 sec_tag_const = rq->tag_mask >> 8;
156                 rc = roc_nix_inl_inb_tag_update(nix, sec_tag_const,
157                                                 ev->sched_type);
158                 if (rc)
159                         plt_err("Failed to set tag conf for ipsec, rc=%d", rc);
160         }
161
162         return rc;
163 }
164
165 static int
166 cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
167 {
168         struct roc_nix_rq *rq;
169
170         rq = &cnxk_eth_dev->rqs[rq_id];
171         rq->sso_ena = 0;
172         rq->flow_tag_width = 32;
173         rq->tag_mask = 0;
174
175         return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
176 }
177
178 static int
179 cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
180                                 uint16_t port_id, uint16_t rq_id, uint16_t sz,
181                                 uint64_t tmo_ns, struct rte_mempool *vmp)
182 {
183         struct roc_nix_rq *rq;
184
185         rq = &cnxk_eth_dev->rqs[rq_id];
186
187         if (!rq->sso_ena)
188                 return -EINVAL;
189         if (rq->flow_tag_width == 0)
190                 return -EINVAL;
191
192         rq->vwqe_ena = 1;
193         rq->vwqe_first_skip = 0;
194         rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
195         rq->vwqe_max_sz_exp = rte_log2_u32(sz);
196         rq->vwqe_wait_tmo =
197                 tmo_ns /
198                 ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
199         rq->tag_mask = (port_id & 0xF) << 20;
200         rq->tag_mask |=
201                 (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
202                 << 24;
203
204         return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
205 }
206
207 int
208 cnxk_sso_rx_adapter_queue_add(
209         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
210         int32_t rx_queue_id,
211         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
212 {
213         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
214         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
215         uint16_t port = eth_dev->data->port_id;
216         struct cnxk_eth_rxq_sp *rxq_sp;
217         int i, rc = 0;
218
219         if (rx_queue_id < 0) {
220                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
221                         rc |= cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev,
222                                                             i, queue_conf);
223         } else {
224                 rxq_sp = cnxk_eth_rxq_to_sp(
225                         eth_dev->data->rx_queues[rx_queue_id]);
226                 cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
227                 rc = cnxk_sso_xae_reconfigure(
228                         (struct rte_eventdev *)(uintptr_t)event_dev);
229                 rc |= cnxk_sso_rxq_enable(
230                         cnxk_eth_dev, (uint16_t)rx_queue_id, port,
231                         &queue_conf->ev,
232                         !!(queue_conf->rx_queue_flags &
233                            RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID));
234                 if (queue_conf->rx_queue_flags &
235                     RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
236                         cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
237                                               RTE_EVENT_TYPE_ETHDEV_VECTOR);
238                         rc |= cnxk_sso_xae_reconfigure(
239                                 (struct rte_eventdev *)(uintptr_t)event_dev);
240                         rc |= cnxk_sso_rx_adapter_vwqe_enable(
241                                 cnxk_eth_dev, port, rx_queue_id,
242                                 queue_conf->vector_sz,
243                                 queue_conf->vector_timeout_ns,
244                                 queue_conf->vector_mp);
245
246                         if (cnxk_eth_dev->vec_drop_re_dis)
247                                 rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
248                                                              false);
249                 }
250                 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
251                                       rxq_sp->qconf.mp->pool_id, true,
252                                       dev->force_ena_bp);
253                 cnxk_eth_dev->nb_rxq_sso++;
254         }
255
256         if (rc < 0) {
257                 plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
258                         queue_conf->ev.queue_id);
259                 return rc;
260         }
261
262         dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
263
264         /* Switch to use PF/VF's NIX LF instead of inline device for inbound
265          * when all the RQ's are switched to event dev mode. We do this only
266          * when dev arg no_inl_dev=1 is selected.
267          */
268         if (cnxk_eth_dev->inb.no_inl_dev &&
269             cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)
270                 cnxk_nix_inb_mode_set(cnxk_eth_dev, false);
271
272         return 0;
273 }
274
275 int
276 cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
277                               const struct rte_eth_dev *eth_dev,
278                               int32_t rx_queue_id)
279 {
280         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
281         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
282         struct cnxk_eth_rxq_sp *rxq_sp;
283         int i, rc = 0;
284
285         RTE_SET_USED(event_dev);
286         if (rx_queue_id < 0) {
287                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
288                         cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
289         } else {
290                 rxq_sp = cnxk_eth_rxq_to_sp(
291                         eth_dev->data->rx_queues[rx_queue_id]);
292                 rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
293                 rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
294                                       rxq_sp->qconf.mp->pool_id, false,
295                                       dev->force_ena_bp);
296                 cnxk_eth_dev->nb_rxq_sso--;
297
298                 /* Enable drop_re if it was disabled earlier */
299                 if (cnxk_eth_dev->vec_drop_re_dis && !cnxk_eth_dev->nb_rxq_sso)
300                         rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix, true);
301         }
302
303         if (rc < 0)
304                 plt_err("Failed to clear Rx adapter config port=%d, q=%d",
305                         eth_dev->data->port_id, rx_queue_id);
306
307         /* Removing RQ from Rx adapter implies need to use
308          * inline device for CQ/Poll mode.
309          */
310         cnxk_nix_inb_mode_set(cnxk_eth_dev, true);
311
312         return rc;
313 }
314
315 int
316 cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
317                           const struct rte_eth_dev *eth_dev)
318 {
319         RTE_SET_USED(event_dev);
320         RTE_SET_USED(eth_dev);
321
322         return 0;
323 }
324
325 int
326 cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
327                          const struct rte_eth_dev *eth_dev)
328 {
329         RTE_SET_USED(event_dev);
330         RTE_SET_USED(eth_dev);
331
332         return 0;
333 }
334
335 static int
336 cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
337 {
338         int rc;
339
340         if (sq->nb_sqb_bufs != nb_sqb_bufs) {
341                 rc = roc_npa_aura_limit_modify(
342                         sq->aura_handle,
343                         RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
344                 if (rc < 0)
345                         return rc;
346
347                 sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
348         }
349         return 0;
350 }
351
352 static void
353 cnxk_sso_tx_queue_data_init(struct cnxk_sso_evdev *dev, uint64_t *txq_data,
354                             uint16_t eth_port_id, uint16_t tx_queue_id)
355 {
356         uint64_t offset = 0;
357         int i;
358
359         dev->max_queue_id[0] = RTE_MAX(dev->max_queue_id[0], eth_port_id);
360         for (i = 1; i < eth_port_id; i++) {
361                 offset += (dev->max_queue_id[i - 1] + 1);
362                 txq_data[i] |= offset << 48;
363         }
364         dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
365         dev->max_queue_id[eth_port_id] =
366                 RTE_MAX(dev->max_queue_id[eth_port_id], tx_queue_id);
367 }
368
369 static void
370 cnxk_sso_tx_queue_data_cpy(struct cnxk_sso_evdev *dev, uint64_t *txq_data,
371                            uint64_t *otxq_data, uint16_t eth_port_id)
372 {
373         uint64_t offset = 0;
374         int i, j;
375
376         for (i = 1; i < eth_port_id; i++) {
377                 offset += (dev->max_queue_id[i - 1] + 1);
378                 txq_data[i] |= offset << 48;
379                 for (j = 0;
380                      (i < dev->max_port_id) && (j < dev->max_queue_id[i] + 1);
381                      j++)
382                         txq_data[offset + j] =
383                                 otxq_data[(otxq_data[i] >> 48) + j];
384         }
385 }
386
387 static void
388 cnxk_sso_tx_queue_data_cpy_max(struct cnxk_sso_evdev *dev, uint64_t *txq_data,
389                                uint64_t *otxq_data, uint16_t eth_port_id,
390                                uint16_t max_port_id, uint16_t max_queue_id)
391 {
392         uint64_t offset = 0;
393         int i, j;
394
395         for (i = 1; i < max_port_id + 1; i++) {
396                 offset += (dev->max_queue_id[i - 1] + 1);
397                 txq_data[i] |= offset << 48;
398                 for (j = 0; j < dev->max_queue_id[i] + 1; j++) {
399                         if (i == eth_port_id && j > max_queue_id)
400                                 continue;
401                         txq_data[offset + j] =
402                                 otxq_data[(otxq_data[i] >> 48) + j];
403                 }
404         }
405 }
406
407 static void
408 cnxk_sso_tx_queue_data_rewrite(struct cnxk_sso_evdev *dev, uint64_t *txq_data,
409                                uint16_t eth_port_id, uint16_t tx_queue_id,
410                                uint64_t *otxq_data, uint16_t max_port_id,
411                                uint16_t max_queue_id)
412 {
413         int i;
414
415         for (i = 0; i < dev->max_queue_id[0] + 1; i++)
416                 txq_data[i] |= (otxq_data[i] & ~((BIT_ULL(16) - 1) << 48));
417
418         if (eth_port_id > max_port_id) {
419                 dev->max_queue_id[0] =
420                         RTE_MAX(dev->max_queue_id[0], eth_port_id);
421                 dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
422
423                 cnxk_sso_tx_queue_data_cpy(dev, txq_data, otxq_data,
424                                            eth_port_id);
425                 dev->max_queue_id[eth_port_id] =
426                         RTE_MAX(dev->max_queue_id[eth_port_id], tx_queue_id);
427         } else if (tx_queue_id > max_queue_id) {
428                 dev->max_queue_id[eth_port_id] =
429                         RTE_MAX(dev->max_queue_id[eth_port_id], tx_queue_id);
430                 dev->max_port_id = RTE_MAX(max_port_id, eth_port_id);
431                 cnxk_sso_tx_queue_data_cpy_max(dev, txq_data, otxq_data,
432                                                eth_port_id, max_port_id,
433                                                max_queue_id);
434         }
435 }
436
437 static void
438 cnxk_sso_tx_queue_data_sz(struct cnxk_sso_evdev *dev, uint16_t eth_port_id,
439                           uint16_t tx_queue_id, uint16_t max_port_id,
440                           uint16_t max_queue_id, uint64_t *r, size_t *sz)
441 {
442         uint64_t row = 0;
443         size_t size = 0;
444         int i;
445
446         if (dev->tx_adptr_data == NULL) {
447                 size = (eth_port_id + 1);
448                 size += (eth_port_id + tx_queue_id);
449                 row = 2 * eth_port_id;
450                 *r = row;
451                 *sz = size;
452                 return;
453         }
454
455         if (eth_port_id > max_port_id) {
456                 size = (RTE_MAX(eth_port_id, dev->max_queue_id[0]) + 1);
457                 for (i = 1; i < eth_port_id; i++)
458                         size += (dev->max_queue_id[i] + 1);
459                 row = size;
460                 size += (tx_queue_id + 1);
461         } else if (tx_queue_id > max_queue_id) {
462                 size = !eth_port_id ?
463                                tx_queue_id + 1 :
464                                      RTE_MAX(max_port_id, dev->max_queue_id[0]) + 1;
465                 for (i = 1; i < max_port_id + 1; i++) {
466                         if (i == eth_port_id) {
467                                 row = size;
468                                 size += tx_queue_id + 1;
469                         } else {
470                                 size += dev->max_queue_id[i] + 1;
471                         }
472                 }
473         }
474         *r = row;
475         *sz = size;
476 }
477
478 static int
479 cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
480                             uint16_t eth_port_id, uint16_t tx_queue_id,
481                             void *txq)
482 {
483         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
484         uint16_t max_queue_id = dev->max_queue_id[eth_port_id];
485         uint16_t max_port_id = dev->max_port_id;
486         uint64_t *txq_data = NULL;
487         uint64_t row = 0;
488         size_t size = 0;
489
490         if (((uint64_t)txq) & 0xFFFF000000000000)
491                 return -EINVAL;
492
493         cnxk_sso_tx_queue_data_sz(dev, eth_port_id, tx_queue_id, max_port_id,
494                                   max_queue_id, &row, &size);
495
496         size *= sizeof(uint64_t);
497
498         if (size) {
499                 uint64_t *otxq_data = dev->tx_adptr_data;
500
501                 txq_data = malloc(size);
502                 if (txq_data == NULL)
503                         return -ENOMEM;
504                 memset(txq_data, 0, size);
505                 txq_data[eth_port_id] = ((uint64_t)row) << 48;
506                 txq_data[row + tx_queue_id] = (uint64_t)txq;
507
508                 if (otxq_data != NULL)
509                         cnxk_sso_tx_queue_data_rewrite(
510                                 dev, txq_data, eth_port_id, tx_queue_id,
511                                 otxq_data, max_port_id, max_queue_id);
512                 else
513                         cnxk_sso_tx_queue_data_init(dev, txq_data, eth_port_id,
514                                                     tx_queue_id);
515                 dev->tx_adptr_data_sz = size;
516                 free(otxq_data);
517                 dev->tx_adptr_data = txq_data;
518         } else {
519                 txq_data = dev->tx_adptr_data;
520                 row = txq_data[eth_port_id] >> 48;
521                 txq_data[row + tx_queue_id] &= ~(BIT_ULL(48) - 1);
522                 txq_data[row + tx_queue_id] |= (uint64_t)txq;
523         }
524
525         return 0;
526 }
527
528 int
529 cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
530                               const struct rte_eth_dev *eth_dev,
531                               int32_t tx_queue_id)
532 {
533         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
534         struct roc_nix_sq *sq;
535         int i, ret = 0;
536         void *txq;
537
538         if (tx_queue_id < 0) {
539                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
540                         ret |= cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev,
541                                                              i);
542         } else {
543                 txq = eth_dev->data->tx_queues[tx_queue_id];
544                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
545                 cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
546                 ret = cnxk_sso_updt_tx_queue_data(
547                         event_dev, eth_dev->data->port_id, tx_queue_id, txq);
548                 if (ret < 0)
549                         return ret;
550         }
551
552         if (ret < 0) {
553                 plt_err("Failed to configure Tx adapter port=%d, q=%d",
554                         eth_dev->data->port_id, tx_queue_id);
555                 return ret;
556         }
557
558         return 0;
559 }
560
561 int
562 cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
563                               const struct rte_eth_dev *eth_dev,
564                               int32_t tx_queue_id)
565 {
566         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
567         struct roc_nix_sq *sq;
568         int i, ret = 0;
569
570         RTE_SET_USED(event_dev);
571         if (tx_queue_id < 0) {
572                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
573                         ret |= cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev,
574                                                              i);
575         } else {
576                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
577                 cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
578                 ret = cnxk_sso_updt_tx_queue_data(
579                         event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
580                 if (ret < 0)
581                         return ret;
582         }
583
584         if (ret < 0) {
585                 plt_err("Failed to clear Tx adapter config port=%d, q=%d",
586                         eth_dev->data->port_id, tx_queue_id);
587                 return ret;
588         }
589
590         return 0;
591 }