1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
4 #include <rte_spinlock.h>
5 #include <rte_service_component.h>
6 #include <rte_ethdev.h>
8 #include "rte_eventdev_pmd.h"
9 #include "rte_event_eth_tx_adapter.h"
11 #define TXA_BATCH_SIZE 32
12 #define TXA_SERVICE_NAME_LEN 32
13 #define TXA_MEM_NAME_LEN 32
14 #define TXA_FLUSH_THRESHOLD 1024
15 #define TXA_RETRY_CNT 100
16 #define TXA_MAX_NB_TX 128
17 #define TXA_INVALID_DEV_ID INT32_C(-1)
18 #define TXA_INVALID_SERVICE_ID INT64_C(-1)
20 #define txa_evdev(id) (&rte_eventdevs[txa_dev_id_array[(id)]])
22 #define txa_dev_caps_get(id) txa_evdev((id))->dev_ops->eth_tx_adapter_caps_get
24 #define txa_dev_adapter_create(t) txa_evdev(t)->dev_ops->eth_tx_adapter_create
26 #define txa_dev_adapter_create_ext(t) \
27 txa_evdev(t)->dev_ops->eth_tx_adapter_create
29 #define txa_dev_adapter_free(t) txa_evdev(t)->dev_ops->eth_tx_adapter_free
31 #define txa_dev_queue_add(id) txa_evdev(id)->dev_ops->eth_tx_adapter_queue_add
33 #define txa_dev_queue_del(t) txa_evdev(t)->dev_ops->eth_tx_adapter_queue_del
35 #define txa_dev_start(t) txa_evdev(t)->dev_ops->eth_tx_adapter_start
37 #define txa_dev_stop(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stop
39 #define txa_dev_stats_reset(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_reset
41 #define txa_dev_stats_get(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_get
43 #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
45 if (!txa_valid_id(id)) { \
46 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
51 #define TXA_CHECK_OR_ERR_RET(id) \
54 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET((id), -EINVAL); \
58 if (!txa_adapter_exist((id))) \
62 /* Tx retry callback structure */
64 /* Ethernet port id */
72 /* Per queue structure */
73 struct txa_service_queue_info {
74 /* Queue has been added */
76 /* Retry callback argument */
77 struct txa_retry txa_retry;
79 struct rte_eth_dev_tx_buffer *tx_buf;
82 /* PMD private structure */
83 struct txa_service_data {
84 /* Max mbufs processed in any service function invocation */
86 /* Number of Tx queues in adapter */
88 /* Synchronization with data path */
89 rte_spinlock_t tx_lock;
92 /* Event device identifier */
94 /* Highest port id supported + 1 */
96 /* Loop count to flush Tx buffers */
98 /* Per ethernet device structure */
99 struct txa_service_ethdev *txa_ethdev;
101 struct rte_event_eth_tx_adapter_stats stats;
102 /* Adapter Identifier */
104 /* Conf arg must be freed */
106 /* Configuration callback */
107 rte_event_eth_tx_adapter_conf_cb conf_cb;
108 /* Configuration callback argument */
112 /* Per adapter EAL service */
114 /* Memory allocation name */
115 char mem_name[TXA_MEM_NAME_LEN];
116 } __rte_cache_aligned;
118 /* Per eth device structure */
119 struct txa_service_ethdev {
120 /* Pointer to ethernet device */
121 struct rte_eth_dev *dev;
122 /* Number of queues added */
124 /* PMD specific queue data */
128 /* Array of adapter instances, initialized with event device id
129 * when adapter is created
131 static int *txa_dev_id_array;
133 /* Array of pointers to service implementation data */
134 static struct txa_service_data **txa_service_data_array;
136 static int32_t txa_service_func(void *args);
137 static int txa_service_adapter_create_ext(uint8_t id,
138 struct rte_eventdev *dev,
139 rte_event_eth_tx_adapter_conf_cb conf_cb,
141 static int txa_service_queue_del(uint8_t id,
142 const struct rte_eth_dev *dev,
143 int32_t tx_queue_id);
146 txa_adapter_exist(uint8_t id)
148 return txa_dev_id_array[id] != TXA_INVALID_DEV_ID;
152 txa_valid_id(uint8_t id)
154 return id < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE;
158 txa_memzone_array_get(const char *name, unsigned int elt_size, int nb_elems)
160 const struct rte_memzone *mz;
163 sz = elt_size * nb_elems;
164 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
166 mz = rte_memzone_lookup(name);
168 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
169 RTE_CACHE_LINE_SIZE);
171 RTE_EDEV_LOG_ERR("failed to reserve memzone"
173 PRId32, name, rte_errno);
182 txa_dev_id_array_init(void)
184 if (txa_dev_id_array == NULL) {
187 txa_dev_id_array = txa_memzone_array_get("txa_adapter_array",
189 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
190 if (txa_dev_id_array == NULL)
193 for (i = 0; i < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; i++)
194 txa_dev_id_array[i] = TXA_INVALID_DEV_ID;
203 return txa_dev_id_array_init();
207 txa_service_data_init(void)
209 if (txa_service_data_array == NULL) {
210 txa_service_data_array =
211 txa_memzone_array_get("txa_service_data_array",
213 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
214 if (txa_service_data_array == NULL)
221 static inline struct txa_service_data *
222 txa_service_id_to_data(uint8_t id)
224 return txa_service_data_array[id];
227 static inline struct txa_service_queue_info *
228 txa_service_queue(struct txa_service_data *txa, uint16_t port_id,
229 uint16_t tx_queue_id)
231 struct txa_service_queue_info *tqi;
233 if (unlikely(txa->txa_ethdev == NULL || txa->dev_count < port_id + 1))
236 tqi = txa->txa_ethdev[port_id].queues;
238 return likely(tqi != NULL) ? tqi + tx_queue_id : NULL;
242 txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
243 struct rte_event_eth_tx_adapter_conf *conf, void *arg)
246 struct rte_eventdev *dev;
247 struct rte_event_port_conf *pc;
248 struct rte_event_dev_config dev_conf;
253 dev = &rte_eventdevs[dev_id];
254 dev_conf = dev->data->dev_conf;
256 started = dev->data->dev_started;
258 rte_event_dev_stop(dev_id);
260 port_id = dev_conf.nb_event_ports;
261 dev_conf.nb_event_ports += 1;
263 ret = rte_event_dev_configure(dev_id, &dev_conf);
265 RTE_EDEV_LOG_ERR("failed to configure event dev %u",
268 if (rte_event_dev_start(dev_id))
274 pc->disable_implicit_release = 0;
275 ret = rte_event_port_setup(dev_id, port_id, pc);
277 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
280 if (rte_event_dev_start(dev_id))
286 conf->event_port_id = port_id;
287 conf->max_nb_tx = TXA_MAX_NB_TX;
289 ret = rte_event_dev_start(dev_id);
294 txa_service_ethdev_alloc(struct txa_service_data *txa)
296 struct txa_service_ethdev *txa_ethdev;
297 uint16_t i, dev_count;
299 dev_count = rte_eth_dev_count_avail();
300 if (txa->txa_ethdev && dev_count == txa->dev_count)
303 txa_ethdev = rte_zmalloc_socket(txa->mem_name,
304 dev_count * sizeof(*txa_ethdev),
307 if (txa_ethdev == NULL) {
308 RTE_EDEV_LOG_ERR("Failed to alloc txa::txa_ethdev ");
313 memcpy(txa_ethdev, txa->txa_ethdev,
314 txa->dev_count * sizeof(*txa_ethdev));
316 RTE_ETH_FOREACH_DEV(i) {
319 txa_ethdev[i].dev = &rte_eth_devices[i];
322 txa->txa_ethdev = txa_ethdev;
323 txa->dev_count = dev_count;
328 txa_service_queue_array_alloc(struct txa_service_data *txa,
331 struct txa_service_queue_info *tqi;
335 ret = txa_service_ethdev_alloc(txa);
339 if (txa->txa_ethdev[port_id].queues)
342 nb_queue = txa->txa_ethdev[port_id].dev->data->nb_tx_queues;
343 tqi = rte_zmalloc_socket(txa->mem_name,
345 sizeof(struct txa_service_queue_info), 0,
349 txa->txa_ethdev[port_id].queues = tqi;
354 txa_service_queue_array_free(struct txa_service_data *txa,
357 struct txa_service_ethdev *txa_ethdev;
358 struct txa_service_queue_info *tqi;
360 txa_ethdev = &txa->txa_ethdev[port_id];
361 if (txa->txa_ethdev == NULL || txa_ethdev->nb_queues != 0)
364 tqi = txa_ethdev->queues;
365 txa_ethdev->queues = NULL;
368 if (txa->nb_queues == 0) {
369 rte_free(txa->txa_ethdev);
370 txa->txa_ethdev = NULL;
375 txa_service_unregister(struct txa_service_data *txa)
377 if (txa->service_id != TXA_INVALID_SERVICE_ID) {
378 rte_service_component_runstate_set(txa->service_id, 0);
379 while (rte_service_may_be_active(txa->service_id))
381 rte_service_component_unregister(txa->service_id);
383 txa->service_id = TXA_INVALID_SERVICE_ID;
387 txa_service_register(struct txa_service_data *txa)
390 struct rte_service_spec service;
391 struct rte_event_eth_tx_adapter_conf conf;
393 if (txa->service_id != TXA_INVALID_SERVICE_ID)
396 memset(&service, 0, sizeof(service));
397 snprintf(service.name, TXA_SERVICE_NAME_LEN, "txa_%d", txa->id);
398 service.socket_id = txa->socket_id;
399 service.callback = txa_service_func;
400 service.callback_userdata = txa;
401 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
402 ret = rte_service_component_register(&service,
403 (uint32_t *)&txa->service_id);
405 RTE_EDEV_LOG_ERR("failed to register service %s err = %"
406 PRId32, service.name, ret);
410 ret = txa->conf_cb(txa->id, txa->eventdev_id, &conf, txa->conf_arg);
412 txa_service_unregister(txa);
416 rte_service_component_runstate_set(txa->service_id, 1);
417 txa->port_id = conf.event_port_id;
418 txa->max_nb_tx = conf.max_nb_tx;
422 static struct rte_eth_dev_tx_buffer *
423 txa_service_tx_buf_alloc(struct txa_service_data *txa,
424 const struct rte_eth_dev *dev)
426 struct rte_eth_dev_tx_buffer *tb;
429 port_id = dev->data->port_id;
430 tb = rte_zmalloc_socket(txa->mem_name,
431 RTE_ETH_TX_BUFFER_SIZE(TXA_BATCH_SIZE),
433 rte_eth_dev_socket_id(port_id));
435 RTE_EDEV_LOG_ERR("Failed to allocate memory for tx buffer");
440 txa_service_is_queue_added(struct txa_service_data *txa,
441 const struct rte_eth_dev *dev,
442 uint16_t tx_queue_id)
444 struct txa_service_queue_info *tqi;
446 tqi = txa_service_queue(txa, dev->data->port_id, tx_queue_id);
447 return tqi && tqi->added;
451 txa_service_ctrl(uint8_t id, int start)
454 struct txa_service_data *txa;
456 txa = txa_service_id_to_data(id);
457 if (txa->service_id == TXA_INVALID_SERVICE_ID)
460 ret = rte_service_runstate_set(txa->service_id, start);
461 if (ret == 0 && !start) {
462 while (rte_service_may_be_active(txa->service_id))
469 txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
472 struct txa_retry *tr;
473 struct txa_service_data *data;
474 struct rte_event_eth_tx_adapter_stats *stats;
476 unsigned int retry = 0;
479 tr = (struct txa_retry *)(uintptr_t)userdata;
480 data = txa_service_id_to_data(tr->id);
481 stats = &data->stats;
484 n = rte_eth_tx_burst(tr->port_id, tr->tx_queue,
485 &pkts[sent], unsent - sent);
488 } while (sent != unsent && retry++ < TXA_RETRY_CNT);
490 for (i = sent; i < unsent; i++)
491 rte_pktmbuf_free(pkts[i]);
493 stats->tx_retry += retry;
494 stats->tx_packets += sent;
495 stats->tx_dropped += unsent - sent;
499 txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
504 struct rte_event_eth_tx_adapter_stats *stats;
509 for (i = 0; i < n; i++) {
513 struct txa_service_queue_info *tqi;
517 queue = rte_event_eth_tx_adapter_txq_get(m);
519 tqi = txa_service_queue(txa, port, queue);
520 if (unlikely(tqi == NULL || !tqi->added)) {
525 nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
528 stats->tx_packets += nb_tx;
532 txa_service_func(void *args)
534 struct txa_service_data *txa = args;
538 uint32_t nb_tx, max_nb_tx;
539 struct rte_event ev[TXA_BATCH_SIZE];
541 dev_id = txa->eventdev_id;
542 max_nb_tx = txa->max_nb_tx;
545 if (txa->nb_queues == 0)
548 if (!rte_spinlock_trylock(&txa->tx_lock))
551 for (nb_tx = 0; nb_tx < max_nb_tx; nb_tx += n) {
553 n = rte_event_dequeue_burst(dev_id, port, ev, RTE_DIM(ev), 0);
556 txa_service_tx(txa, ev, n);
559 if ((txa->loop_cnt++ & (TXA_FLUSH_THRESHOLD - 1)) == 0) {
561 struct txa_service_ethdev *tdi;
562 struct txa_service_queue_info *tqi;
563 struct rte_eth_dev *dev;
566 tdi = txa->txa_ethdev;
569 RTE_ETH_FOREACH_DEV(i) {
572 if (i == txa->dev_count)
576 if (tdi[i].nb_queues == 0)
578 for (q = 0; q < dev->data->nb_tx_queues; q++) {
580 tqi = txa_service_queue(txa, i, q);
581 if (unlikely(tqi == NULL || !tqi->added))
584 nb_tx += rte_eth_tx_buffer_flush(i, q,
589 txa->stats.tx_packets += nb_tx;
591 rte_spinlock_unlock(&txa->tx_lock);
596 txa_service_adapter_create(uint8_t id, struct rte_eventdev *dev,
597 struct rte_event_port_conf *port_conf)
599 struct txa_service_data *txa;
600 struct rte_event_port_conf *cb_conf;
603 cb_conf = rte_malloc(NULL, sizeof(*cb_conf), 0);
607 *cb_conf = *port_conf;
608 ret = txa_service_adapter_create_ext(id, dev, txa_service_conf_cb,
615 txa = txa_service_id_to_data(id);
621 txa_service_adapter_create_ext(uint8_t id, struct rte_eventdev *dev,
622 rte_event_eth_tx_adapter_conf_cb conf_cb,
625 struct txa_service_data *txa;
627 char mem_name[TXA_SERVICE_NAME_LEN];
633 socket_id = dev->data->socket_id;
634 snprintf(mem_name, TXA_MEM_NAME_LEN,
635 "rte_event_eth_txa_%d",
638 ret = txa_service_data_init();
642 txa = rte_zmalloc_socket(mem_name,
644 RTE_CACHE_LINE_SIZE, socket_id);
646 RTE_EDEV_LOG_ERR("failed to get mem for tx adapter");
651 txa->eventdev_id = dev->data->dev_id;
652 txa->socket_id = socket_id;
653 strncpy(txa->mem_name, mem_name, TXA_SERVICE_NAME_LEN);
654 txa->conf_cb = conf_cb;
655 txa->conf_arg = conf_arg;
656 txa->service_id = TXA_INVALID_SERVICE_ID;
657 rte_spinlock_init(&txa->tx_lock);
658 txa_service_data_array[id] = txa;
664 txa_service_event_port_get(uint8_t id, uint8_t *port)
666 struct txa_service_data *txa;
668 txa = txa_service_id_to_data(id);
669 if (txa->service_id == TXA_INVALID_SERVICE_ID)
672 *port = txa->port_id;
677 txa_service_adapter_free(uint8_t id)
679 struct txa_service_data *txa;
681 txa = txa_service_id_to_data(id);
682 if (txa->nb_queues) {
683 RTE_EDEV_LOG_ERR("%" PRIu16 " Tx queues not deleted",
689 rte_free(txa->conf_arg);
695 txa_service_queue_add(uint8_t id,
696 __rte_unused struct rte_eventdev *dev,
697 const struct rte_eth_dev *eth_dev,
700 struct txa_service_data *txa;
701 struct txa_service_ethdev *tdi;
702 struct txa_service_queue_info *tqi;
703 struct rte_eth_dev_tx_buffer *tb;
704 struct txa_retry *txa_retry;
707 txa = txa_service_id_to_data(id);
709 if (tx_queue_id == -1) {
714 nb_queues = eth_dev->data->nb_tx_queues;
715 if (txa->dev_count > eth_dev->data->port_id) {
716 tdi = &txa->txa_ethdev[eth_dev->data->port_id];
717 nb_queues -= tdi->nb_queues;
720 qdone = rte_zmalloc(txa->mem_name,
721 nb_queues * sizeof(*qdone), 0);
723 for (i = 0; i < nb_queues; i++) {
724 if (txa_service_is_queue_added(txa, eth_dev, i))
726 ret = txa_service_queue_add(id, dev, eth_dev, i);
733 if (i != nb_queues) {
734 for (i = 0; i < j; i++)
735 txa_service_queue_del(id, eth_dev, qdone[i]);
741 ret = txa_service_register(txa);
745 rte_spinlock_lock(&txa->tx_lock);
747 if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id)) {
748 rte_spinlock_unlock(&txa->tx_lock);
752 ret = txa_service_queue_array_alloc(txa, eth_dev->data->port_id);
756 tb = txa_service_tx_buf_alloc(txa, eth_dev);
760 tdi = &txa->txa_ethdev[eth_dev->data->port_id];
761 tqi = txa_service_queue(txa, eth_dev->data->port_id, tx_queue_id);
763 txa_retry = &tqi->txa_retry;
764 txa_retry->id = txa->id;
765 txa_retry->port_id = eth_dev->data->port_id;
766 txa_retry->tx_queue = tx_queue_id;
768 rte_eth_tx_buffer_init(tb, TXA_BATCH_SIZE);
769 rte_eth_tx_buffer_set_err_callback(tb,
770 txa_service_buffer_retry, txa_retry);
778 if (txa->nb_queues == 0) {
779 txa_service_queue_array_free(txa,
780 eth_dev->data->port_id);
781 txa_service_unregister(txa);
784 rte_spinlock_unlock(&txa->tx_lock);
789 txa_service_queue_del(uint8_t id,
790 const struct rte_eth_dev *dev,
793 struct txa_service_data *txa;
794 struct txa_service_queue_info *tqi;
795 struct rte_eth_dev_tx_buffer *tb;
798 if (tx_queue_id == -1) {
802 for (i = 0; i < dev->data->nb_tx_queues; i++) {
803 ret = txa_service_queue_del(id, dev, i);
810 txa = txa_service_id_to_data(id);
811 port_id = dev->data->port_id;
813 tqi = txa_service_queue(txa, port_id, tx_queue_id);
814 if (tqi == NULL || !tqi->added)
822 txa->txa_ethdev[port_id].nb_queues--;
824 txa_service_queue_array_free(txa, port_id);
829 txa_service_id_get(uint8_t id, uint32_t *service_id)
831 struct txa_service_data *txa;
833 txa = txa_service_id_to_data(id);
834 if (txa->service_id == TXA_INVALID_SERVICE_ID)
837 if (service_id == NULL)
840 *service_id = txa->service_id;
845 txa_service_start(uint8_t id)
847 return txa_service_ctrl(id, 1);
851 txa_service_stats_get(uint8_t id,
852 struct rte_event_eth_tx_adapter_stats *stats)
854 struct txa_service_data *txa;
856 txa = txa_service_id_to_data(id);
862 txa_service_stats_reset(uint8_t id)
864 struct txa_service_data *txa;
866 txa = txa_service_id_to_data(id);
867 memset(&txa->stats, 0, sizeof(txa->stats));
872 txa_service_stop(uint8_t id)
874 return txa_service_ctrl(id, 0);
878 int __rte_experimental
879 rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id,
880 struct rte_event_port_conf *port_conf)
882 struct rte_eventdev *dev;
885 if (port_conf == NULL)
888 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
889 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
891 dev = &rte_eventdevs[dev_id];
897 if (txa_adapter_exist(id))
900 txa_dev_id_array[id] = dev_id;
901 if (txa_dev_adapter_create(id))
902 ret = txa_dev_adapter_create(id)(id, dev);
905 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
909 ret = txa_service_adapter_create(id, dev, port_conf);
911 if (txa_dev_adapter_free(id))
912 txa_dev_adapter_free(id)(id, dev);
913 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
917 txa_dev_id_array[id] = dev_id;
921 int __rte_experimental
922 rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id,
923 rte_event_eth_tx_adapter_conf_cb conf_cb,
926 struct rte_eventdev *dev;
929 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
930 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
936 if (txa_adapter_exist(id))
939 dev = &rte_eventdevs[dev_id];
941 txa_dev_id_array[id] = dev_id;
942 if (txa_dev_adapter_create_ext(id))
943 ret = txa_dev_adapter_create_ext(id)(id, dev);
946 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
950 ret = txa_service_adapter_create_ext(id, dev, conf_cb, conf_arg);
952 if (txa_dev_adapter_free(id))
953 txa_dev_adapter_free(id)(id, dev);
954 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
958 txa_dev_id_array[id] = dev_id;
963 int __rte_experimental
964 rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
966 TXA_CHECK_OR_ERR_RET(id);
968 return txa_service_event_port_get(id, event_port_id);
971 int __rte_experimental
972 rte_event_eth_tx_adapter_free(uint8_t id)
976 TXA_CHECK_OR_ERR_RET(id);
978 ret = txa_dev_adapter_free(id) ?
979 txa_dev_adapter_free(id)(id, txa_evdev(id)) :
983 ret = txa_service_adapter_free(id);
984 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
989 int __rte_experimental
990 rte_event_eth_tx_adapter_queue_add(uint8_t id,
994 struct rte_eth_dev *eth_dev;
998 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
999 TXA_CHECK_OR_ERR_RET(id);
1001 eth_dev = &rte_eth_devices[eth_dev_id];
1002 if (queue != -1 && (uint16_t)queue >= eth_dev->data->nb_tx_queues) {
1003 RTE_EDEV_LOG_ERR("Invalid tx queue_id %" PRIu16,
1009 if (txa_dev_caps_get(id))
1010 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps);
1012 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
1013 ret = txa_dev_queue_add(id) ?
1014 txa_dev_queue_add(id)(id,
1019 ret = txa_service_queue_add(id, txa_evdev(id), eth_dev, queue);
1024 int __rte_experimental
1025 rte_event_eth_tx_adapter_queue_del(uint8_t id,
1026 uint16_t eth_dev_id,
1029 struct rte_eth_dev *eth_dev;
1033 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
1034 TXA_CHECK_OR_ERR_RET(id);
1036 eth_dev = &rte_eth_devices[eth_dev_id];
1037 if (queue != -1 && (uint16_t)queue >= eth_dev->data->nb_tx_queues) {
1038 RTE_EDEV_LOG_ERR("Invalid tx queue_id %" PRIu16,
1045 if (txa_dev_caps_get(id))
1046 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps);
1048 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
1049 ret = txa_dev_queue_del(id) ?
1050 txa_dev_queue_del(id)(id, txa_evdev(id),
1054 ret = txa_service_queue_del(id, eth_dev, queue);
1059 int __rte_experimental
1060 rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1062 TXA_CHECK_OR_ERR_RET(id);
1064 return txa_service_id_get(id, service_id);
1067 int __rte_experimental
1068 rte_event_eth_tx_adapter_start(uint8_t id)
1072 TXA_CHECK_OR_ERR_RET(id);
1074 ret = txa_dev_start(id) ? txa_dev_start(id)(id, txa_evdev(id)) : 0;
1076 ret = txa_service_start(id);
1080 int __rte_experimental
1081 rte_event_eth_tx_adapter_stats_get(uint8_t id,
1082 struct rte_event_eth_tx_adapter_stats *stats)
1086 TXA_CHECK_OR_ERR_RET(id);
1091 *stats = (struct rte_event_eth_tx_adapter_stats){0};
1093 ret = txa_dev_stats_get(id) ?
1094 txa_dev_stats_get(id)(id, txa_evdev(id), stats) : 0;
1096 if (ret == 0 && txa_service_id_get(id, NULL) != ESRCH) {
1097 if (txa_dev_stats_get(id)) {
1098 struct rte_event_eth_tx_adapter_stats service_stats;
1100 ret = txa_service_stats_get(id, &service_stats);
1102 stats->tx_retry += service_stats.tx_retry;
1103 stats->tx_packets += service_stats.tx_packets;
1104 stats->tx_dropped += service_stats.tx_dropped;
1107 ret = txa_service_stats_get(id, stats);
1113 int __rte_experimental
1114 rte_event_eth_tx_adapter_stats_reset(uint8_t id)
1118 TXA_CHECK_OR_ERR_RET(id);
1120 ret = txa_dev_stats_reset(id) ?
1121 txa_dev_stats_reset(id)(id, txa_evdev(id)) : 0;
1123 ret = txa_service_stats_reset(id);
1127 int __rte_experimental
1128 rte_event_eth_tx_adapter_stop(uint8_t id)
1132 TXA_CHECK_OR_ERR_RET(id);
1134 ret = txa_dev_stop(id) ? txa_dev_stop(id)(id, txa_evdev(id)) : 0;
1136 ret = txa_service_stop(id);