1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
4 #include <rte_spinlock.h>
5 #include <rte_service_component.h>
6 #include <rte_ethdev.h>
8 #include "rte_eventdev_pmd.h"
9 #include "rte_eventdev_trace.h"
10 #include "rte_event_eth_tx_adapter.h"
12 #define TXA_BATCH_SIZE 32
13 #define TXA_SERVICE_NAME_LEN 32
14 #define TXA_MEM_NAME_LEN 32
15 #define TXA_FLUSH_THRESHOLD 1024
16 #define TXA_RETRY_CNT 100
17 #define TXA_MAX_NB_TX 128
18 #define TXA_INVALID_DEV_ID INT32_C(-1)
19 #define TXA_INVALID_SERVICE_ID INT64_C(-1)
21 #define txa_evdev(id) (&rte_eventdevs[txa_dev_id_array[(id)]])
23 #define txa_dev_caps_get(id) txa_evdev((id))->dev_ops->eth_tx_adapter_caps_get
25 #define txa_dev_adapter_create(t) txa_evdev(t)->dev_ops->eth_tx_adapter_create
27 #define txa_dev_adapter_create_ext(t) \
28 txa_evdev(t)->dev_ops->eth_tx_adapter_create
30 #define txa_dev_adapter_free(t) txa_evdev(t)->dev_ops->eth_tx_adapter_free
32 #define txa_dev_queue_add(id) txa_evdev(id)->dev_ops->eth_tx_adapter_queue_add
34 #define txa_dev_queue_del(t) txa_evdev(t)->dev_ops->eth_tx_adapter_queue_del
36 #define txa_dev_start(t) txa_evdev(t)->dev_ops->eth_tx_adapter_start
38 #define txa_dev_stop(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stop
40 #define txa_dev_stats_reset(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_reset
42 #define txa_dev_stats_get(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_get
44 #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
46 if (!txa_valid_id(id)) { \
47 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
52 #define TXA_CHECK_OR_ERR_RET(id) \
55 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET((id), -EINVAL); \
59 if (!txa_adapter_exist((id))) \
63 #define TXA_CHECK_TXQ(dev, queue) \
65 if ((dev)->data->nb_tx_queues == 0) { \
66 RTE_EDEV_LOG_ERR("No tx queues configured"); \
69 if ((queue) != -1 && \
70 (uint16_t)(queue) >= (dev)->data->nb_tx_queues) { \
71 RTE_EDEV_LOG_ERR("Invalid tx queue_id %" PRIu16, \
77 /* Tx retry callback structure */
79 /* Ethernet port id */
87 /* Per queue structure */
88 struct txa_service_queue_info {
89 /* Queue has been added */
91 /* Retry callback argument */
92 struct txa_retry txa_retry;
94 struct rte_eth_dev_tx_buffer *tx_buf;
97 /* PMD private structure */
98 struct txa_service_data {
99 /* Max mbufs processed in any service function invocation */
101 /* Number of Tx queues in adapter */
103 /* Synchronization with data path */
104 rte_spinlock_t tx_lock;
107 /* Event device identifier */
109 /* Highest port id supported + 1 */
111 /* Loop count to flush Tx buffers */
113 /* Per ethernet device structure */
114 struct txa_service_ethdev *txa_ethdev;
116 struct rte_event_eth_tx_adapter_stats stats;
117 /* Adapter Identifier */
119 /* Conf arg must be freed */
121 /* Configuration callback */
122 rte_event_eth_tx_adapter_conf_cb conf_cb;
123 /* Configuration callback argument */
127 /* Per adapter EAL service */
129 /* Memory allocation name */
130 char mem_name[TXA_MEM_NAME_LEN];
131 } __rte_cache_aligned;
133 /* Per eth device structure */
134 struct txa_service_ethdev {
135 /* Pointer to ethernet device */
136 struct rte_eth_dev *dev;
137 /* Number of queues added */
139 /* PMD specific queue data */
143 /* Array of adapter instances, initialized with event device id
144 * when adapter is created
146 static int *txa_dev_id_array;
148 /* Array of pointers to service implementation data */
149 static struct txa_service_data **txa_service_data_array;
151 static int32_t txa_service_func(void *args);
152 static int txa_service_adapter_create_ext(uint8_t id,
153 struct rte_eventdev *dev,
154 rte_event_eth_tx_adapter_conf_cb conf_cb,
156 static int txa_service_queue_del(uint8_t id,
157 const struct rte_eth_dev *dev,
158 int32_t tx_queue_id);
161 txa_adapter_exist(uint8_t id)
163 return txa_dev_id_array[id] != TXA_INVALID_DEV_ID;
167 txa_valid_id(uint8_t id)
169 return id < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE;
173 txa_memzone_array_get(const char *name, unsigned int elt_size, int nb_elems)
175 const struct rte_memzone *mz;
178 sz = elt_size * nb_elems;
179 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
181 mz = rte_memzone_lookup(name);
183 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
184 RTE_CACHE_LINE_SIZE);
186 RTE_EDEV_LOG_ERR("failed to reserve memzone"
188 PRId32, name, rte_errno);
197 txa_dev_id_array_init(void)
199 if (txa_dev_id_array == NULL) {
202 txa_dev_id_array = txa_memzone_array_get("txa_adapter_array",
204 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
205 if (txa_dev_id_array == NULL)
208 for (i = 0; i < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; i++)
209 txa_dev_id_array[i] = TXA_INVALID_DEV_ID;
218 return txa_dev_id_array_init();
222 txa_service_data_init(void)
224 if (txa_service_data_array == NULL) {
225 txa_service_data_array =
226 txa_memzone_array_get("txa_service_data_array",
228 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE);
229 if (txa_service_data_array == NULL)
236 static inline struct txa_service_data *
237 txa_service_id_to_data(uint8_t id)
239 return txa_service_data_array[id];
242 static inline struct txa_service_queue_info *
243 txa_service_queue(struct txa_service_data *txa, uint16_t port_id,
244 uint16_t tx_queue_id)
246 struct txa_service_queue_info *tqi;
248 if (unlikely(txa->txa_ethdev == NULL || txa->dev_count < port_id + 1))
251 tqi = txa->txa_ethdev[port_id].queues;
253 return likely(tqi != NULL) ? tqi + tx_queue_id : NULL;
257 txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
258 struct rte_event_eth_tx_adapter_conf *conf, void *arg)
261 struct rte_eventdev *dev;
262 struct rte_event_port_conf *pc;
263 struct rte_event_dev_config dev_conf;
268 dev = &rte_eventdevs[dev_id];
269 dev_conf = dev->data->dev_conf;
271 started = dev->data->dev_started;
273 rte_event_dev_stop(dev_id);
275 port_id = dev_conf.nb_event_ports;
276 dev_conf.nb_event_ports += 1;
278 ret = rte_event_dev_configure(dev_id, &dev_conf);
280 RTE_EDEV_LOG_ERR("failed to configure event dev %u",
283 if (rte_event_dev_start(dev_id))
289 pc->disable_implicit_release = 0;
290 ret = rte_event_port_setup(dev_id, port_id, pc);
292 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
295 if (rte_event_dev_start(dev_id))
301 conf->event_port_id = port_id;
302 conf->max_nb_tx = TXA_MAX_NB_TX;
304 ret = rte_event_dev_start(dev_id);
309 txa_service_ethdev_alloc(struct txa_service_data *txa)
311 struct txa_service_ethdev *txa_ethdev;
312 uint16_t i, dev_count;
314 dev_count = rte_eth_dev_count_avail();
315 if (txa->txa_ethdev && dev_count == txa->dev_count)
318 txa_ethdev = rte_zmalloc_socket(txa->mem_name,
319 dev_count * sizeof(*txa_ethdev),
322 if (txa_ethdev == NULL) {
323 RTE_EDEV_LOG_ERR("Failed to alloc txa::txa_ethdev ");
328 memcpy(txa_ethdev, txa->txa_ethdev,
329 txa->dev_count * sizeof(*txa_ethdev));
331 RTE_ETH_FOREACH_DEV(i) {
334 txa_ethdev[i].dev = &rte_eth_devices[i];
337 txa->txa_ethdev = txa_ethdev;
338 txa->dev_count = dev_count;
343 txa_service_queue_array_alloc(struct txa_service_data *txa,
346 struct txa_service_queue_info *tqi;
350 ret = txa_service_ethdev_alloc(txa);
354 if (txa->txa_ethdev[port_id].queues)
357 nb_queue = txa->txa_ethdev[port_id].dev->data->nb_tx_queues;
358 tqi = rte_zmalloc_socket(txa->mem_name,
360 sizeof(struct txa_service_queue_info), 0,
364 txa->txa_ethdev[port_id].queues = tqi;
369 txa_service_queue_array_free(struct txa_service_data *txa,
372 struct txa_service_ethdev *txa_ethdev;
373 struct txa_service_queue_info *tqi;
375 txa_ethdev = &txa->txa_ethdev[port_id];
376 if (txa->txa_ethdev == NULL || txa_ethdev->nb_queues != 0)
379 tqi = txa_ethdev->queues;
380 txa_ethdev->queues = NULL;
383 if (txa->nb_queues == 0) {
384 rte_free(txa->txa_ethdev);
385 txa->txa_ethdev = NULL;
390 txa_service_unregister(struct txa_service_data *txa)
392 if (txa->service_id != TXA_INVALID_SERVICE_ID) {
393 rte_service_component_runstate_set(txa->service_id, 0);
394 while (rte_service_may_be_active(txa->service_id))
396 rte_service_component_unregister(txa->service_id);
398 txa->service_id = TXA_INVALID_SERVICE_ID;
402 txa_service_register(struct txa_service_data *txa)
405 struct rte_service_spec service;
406 struct rte_event_eth_tx_adapter_conf conf;
408 if (txa->service_id != TXA_INVALID_SERVICE_ID)
411 memset(&service, 0, sizeof(service));
412 snprintf(service.name, TXA_SERVICE_NAME_LEN, "txa_%d", txa->id);
413 service.socket_id = txa->socket_id;
414 service.callback = txa_service_func;
415 service.callback_userdata = txa;
416 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
417 ret = rte_service_component_register(&service,
418 (uint32_t *)&txa->service_id);
420 RTE_EDEV_LOG_ERR("failed to register service %s err = %"
421 PRId32, service.name, ret);
425 ret = txa->conf_cb(txa->id, txa->eventdev_id, &conf, txa->conf_arg);
427 txa_service_unregister(txa);
431 rte_service_component_runstate_set(txa->service_id, 1);
432 txa->port_id = conf.event_port_id;
433 txa->max_nb_tx = conf.max_nb_tx;
437 static struct rte_eth_dev_tx_buffer *
438 txa_service_tx_buf_alloc(struct txa_service_data *txa,
439 const struct rte_eth_dev *dev)
441 struct rte_eth_dev_tx_buffer *tb;
444 port_id = dev->data->port_id;
445 tb = rte_zmalloc_socket(txa->mem_name,
446 RTE_ETH_TX_BUFFER_SIZE(TXA_BATCH_SIZE),
448 rte_eth_dev_socket_id(port_id));
450 RTE_EDEV_LOG_ERR("Failed to allocate memory for tx buffer");
455 txa_service_is_queue_added(struct txa_service_data *txa,
456 const struct rte_eth_dev *dev,
457 uint16_t tx_queue_id)
459 struct txa_service_queue_info *tqi;
461 tqi = txa_service_queue(txa, dev->data->port_id, tx_queue_id);
462 return tqi && tqi->added;
466 txa_service_ctrl(uint8_t id, int start)
469 struct txa_service_data *txa;
471 txa = txa_service_id_to_data(id);
472 if (txa->service_id == TXA_INVALID_SERVICE_ID)
475 ret = rte_service_runstate_set(txa->service_id, start);
476 if (ret == 0 && !start) {
477 while (rte_service_may_be_active(txa->service_id))
484 txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
487 struct txa_retry *tr;
488 struct txa_service_data *data;
489 struct rte_event_eth_tx_adapter_stats *stats;
491 unsigned int retry = 0;
494 tr = (struct txa_retry *)(uintptr_t)userdata;
495 data = txa_service_id_to_data(tr->id);
496 stats = &data->stats;
499 n = rte_eth_tx_burst(tr->port_id, tr->tx_queue,
500 &pkts[sent], unsent - sent);
503 } while (sent != unsent && retry++ < TXA_RETRY_CNT);
505 for (i = sent; i < unsent; i++)
506 rte_pktmbuf_free(pkts[i]);
508 stats->tx_retry += retry;
509 stats->tx_packets += sent;
510 stats->tx_dropped += unsent - sent;
514 txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
519 struct rte_event_eth_tx_adapter_stats *stats;
524 for (i = 0; i < n; i++) {
528 struct txa_service_queue_info *tqi;
532 queue = rte_event_eth_tx_adapter_txq_get(m);
534 tqi = txa_service_queue(txa, port, queue);
535 if (unlikely(tqi == NULL || !tqi->added)) {
540 nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
543 stats->tx_packets += nb_tx;
547 txa_service_func(void *args)
549 struct txa_service_data *txa = args;
553 uint32_t nb_tx, max_nb_tx;
554 struct rte_event ev[TXA_BATCH_SIZE];
556 dev_id = txa->eventdev_id;
557 max_nb_tx = txa->max_nb_tx;
560 if (txa->nb_queues == 0)
563 if (!rte_spinlock_trylock(&txa->tx_lock))
566 for (nb_tx = 0; nb_tx < max_nb_tx; nb_tx += n) {
568 n = rte_event_dequeue_burst(dev_id, port, ev, RTE_DIM(ev), 0);
571 txa_service_tx(txa, ev, n);
574 if ((txa->loop_cnt++ & (TXA_FLUSH_THRESHOLD - 1)) == 0) {
576 struct txa_service_ethdev *tdi;
577 struct txa_service_queue_info *tqi;
578 struct rte_eth_dev *dev;
581 tdi = txa->txa_ethdev;
584 RTE_ETH_FOREACH_DEV(i) {
587 if (i == txa->dev_count)
591 if (tdi[i].nb_queues == 0)
593 for (q = 0; q < dev->data->nb_tx_queues; q++) {
595 tqi = txa_service_queue(txa, i, q);
596 if (unlikely(tqi == NULL || !tqi->added))
599 nb_tx += rte_eth_tx_buffer_flush(i, q,
604 txa->stats.tx_packets += nb_tx;
606 rte_spinlock_unlock(&txa->tx_lock);
611 txa_service_adapter_create(uint8_t id, struct rte_eventdev *dev,
612 struct rte_event_port_conf *port_conf)
614 struct txa_service_data *txa;
615 struct rte_event_port_conf *cb_conf;
618 cb_conf = rte_malloc(NULL, sizeof(*cb_conf), 0);
622 *cb_conf = *port_conf;
623 ret = txa_service_adapter_create_ext(id, dev, txa_service_conf_cb,
630 txa = txa_service_id_to_data(id);
636 txa_service_adapter_create_ext(uint8_t id, struct rte_eventdev *dev,
637 rte_event_eth_tx_adapter_conf_cb conf_cb,
640 struct txa_service_data *txa;
642 char mem_name[TXA_SERVICE_NAME_LEN];
648 socket_id = dev->data->socket_id;
649 snprintf(mem_name, TXA_MEM_NAME_LEN,
650 "rte_event_eth_txa_%d",
653 ret = txa_service_data_init();
657 txa = rte_zmalloc_socket(mem_name,
659 RTE_CACHE_LINE_SIZE, socket_id);
661 RTE_EDEV_LOG_ERR("failed to get mem for tx adapter");
666 txa->eventdev_id = dev->data->dev_id;
667 txa->socket_id = socket_id;
668 strncpy(txa->mem_name, mem_name, TXA_SERVICE_NAME_LEN);
669 txa->conf_cb = conf_cb;
670 txa->conf_arg = conf_arg;
671 txa->service_id = TXA_INVALID_SERVICE_ID;
672 rte_spinlock_init(&txa->tx_lock);
673 txa_service_data_array[id] = txa;
679 txa_service_event_port_get(uint8_t id, uint8_t *port)
681 struct txa_service_data *txa;
683 txa = txa_service_id_to_data(id);
684 if (txa->service_id == TXA_INVALID_SERVICE_ID)
687 *port = txa->port_id;
692 txa_service_adapter_free(uint8_t id)
694 struct txa_service_data *txa;
696 txa = txa_service_id_to_data(id);
697 if (txa->nb_queues) {
698 RTE_EDEV_LOG_ERR("%" PRIu16 " Tx queues not deleted",
704 rte_free(txa->conf_arg);
710 txa_service_queue_add(uint8_t id,
711 __rte_unused struct rte_eventdev *dev,
712 const struct rte_eth_dev *eth_dev,
715 struct txa_service_data *txa;
716 struct txa_service_ethdev *tdi;
717 struct txa_service_queue_info *tqi;
718 struct rte_eth_dev_tx_buffer *tb;
719 struct txa_retry *txa_retry;
722 txa = txa_service_id_to_data(id);
724 if (tx_queue_id == -1) {
729 nb_queues = eth_dev->data->nb_tx_queues;
730 if (txa->dev_count > eth_dev->data->port_id) {
731 tdi = &txa->txa_ethdev[eth_dev->data->port_id];
732 nb_queues -= tdi->nb_queues;
735 qdone = rte_zmalloc(txa->mem_name,
736 nb_queues * sizeof(*qdone), 0);
738 for (i = 0; i < nb_queues; i++) {
739 if (txa_service_is_queue_added(txa, eth_dev, i))
741 ret = txa_service_queue_add(id, dev, eth_dev, i);
748 if (i != nb_queues) {
749 for (i = 0; i < j; i++)
750 txa_service_queue_del(id, eth_dev, qdone[i]);
756 ret = txa_service_register(txa);
760 rte_spinlock_lock(&txa->tx_lock);
762 if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id)) {
763 rte_spinlock_unlock(&txa->tx_lock);
767 ret = txa_service_queue_array_alloc(txa, eth_dev->data->port_id);
771 tb = txa_service_tx_buf_alloc(txa, eth_dev);
775 tdi = &txa->txa_ethdev[eth_dev->data->port_id];
776 tqi = txa_service_queue(txa, eth_dev->data->port_id, tx_queue_id);
778 txa_retry = &tqi->txa_retry;
779 txa_retry->id = txa->id;
780 txa_retry->port_id = eth_dev->data->port_id;
781 txa_retry->tx_queue = tx_queue_id;
783 rte_eth_tx_buffer_init(tb, TXA_BATCH_SIZE);
784 rte_eth_tx_buffer_set_err_callback(tb,
785 txa_service_buffer_retry, txa_retry);
793 if (txa->nb_queues == 0) {
794 txa_service_queue_array_free(txa,
795 eth_dev->data->port_id);
796 txa_service_unregister(txa);
799 rte_spinlock_unlock(&txa->tx_lock);
804 txa_service_queue_del(uint8_t id,
805 const struct rte_eth_dev *dev,
808 struct txa_service_data *txa;
809 struct txa_service_queue_info *tqi;
810 struct rte_eth_dev_tx_buffer *tb;
813 txa = txa_service_id_to_data(id);
814 port_id = dev->data->port_id;
816 if (tx_queue_id == -1) {
817 uint16_t i, q, nb_queues;
820 nb_queues = txa->nb_queues;
826 tqi = txa->txa_ethdev[port_id].queues;
828 while (i < nb_queues) {
831 ret = txa_service_queue_del(id, dev, q);
841 txa = txa_service_id_to_data(id);
843 tqi = txa_service_queue(txa, port_id, tx_queue_id);
844 if (tqi == NULL || !tqi->added)
852 txa->txa_ethdev[port_id].nb_queues--;
854 txa_service_queue_array_free(txa, port_id);
859 txa_service_id_get(uint8_t id, uint32_t *service_id)
861 struct txa_service_data *txa;
863 txa = txa_service_id_to_data(id);
864 if (txa->service_id == TXA_INVALID_SERVICE_ID)
867 if (service_id == NULL)
870 *service_id = txa->service_id;
875 txa_service_start(uint8_t id)
877 return txa_service_ctrl(id, 1);
881 txa_service_stats_get(uint8_t id,
882 struct rte_event_eth_tx_adapter_stats *stats)
884 struct txa_service_data *txa;
886 txa = txa_service_id_to_data(id);
892 txa_service_stats_reset(uint8_t id)
894 struct txa_service_data *txa;
896 txa = txa_service_id_to_data(id);
897 memset(&txa->stats, 0, sizeof(txa->stats));
902 txa_service_stop(uint8_t id)
904 return txa_service_ctrl(id, 0);
909 rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id,
910 struct rte_event_port_conf *port_conf)
912 struct rte_eventdev *dev;
915 if (port_conf == NULL)
918 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
919 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
921 dev = &rte_eventdevs[dev_id];
927 if (txa_adapter_exist(id))
930 txa_dev_id_array[id] = dev_id;
931 if (txa_dev_adapter_create(id))
932 ret = txa_dev_adapter_create(id)(id, dev);
935 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
939 ret = txa_service_adapter_create(id, dev, port_conf);
941 if (txa_dev_adapter_free(id))
942 txa_dev_adapter_free(id)(id, dev);
943 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
946 rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, NULL, port_conf,
948 txa_dev_id_array[id] = dev_id;
953 rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id,
954 rte_event_eth_tx_adapter_conf_cb conf_cb,
957 struct rte_eventdev *dev;
960 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
961 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
967 if (txa_adapter_exist(id))
970 dev = &rte_eventdevs[dev_id];
972 txa_dev_id_array[id] = dev_id;
973 if (txa_dev_adapter_create_ext(id))
974 ret = txa_dev_adapter_create_ext(id)(id, dev);
977 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
981 ret = txa_service_adapter_create_ext(id, dev, conf_cb, conf_arg);
983 if (txa_dev_adapter_free(id))
984 txa_dev_adapter_free(id)(id, dev);
985 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
989 rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, conf_cb, conf_arg,
991 txa_dev_id_array[id] = dev_id;
997 rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
999 TXA_CHECK_OR_ERR_RET(id);
1001 return txa_service_event_port_get(id, event_port_id);
1005 rte_event_eth_tx_adapter_free(uint8_t id)
1009 TXA_CHECK_OR_ERR_RET(id);
1011 ret = txa_dev_adapter_free(id) ?
1012 txa_dev_adapter_free(id)(id, txa_evdev(id)) :
1016 ret = txa_service_adapter_free(id);
1017 txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
1019 rte_eventdev_trace_eth_tx_adapter_free(id, ret);
1024 rte_event_eth_tx_adapter_queue_add(uint8_t id,
1025 uint16_t eth_dev_id,
1028 struct rte_eth_dev *eth_dev;
1032 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
1033 TXA_CHECK_OR_ERR_RET(id);
1035 eth_dev = &rte_eth_devices[eth_dev_id];
1036 TXA_CHECK_TXQ(eth_dev, queue);
1039 if (txa_dev_caps_get(id))
1040 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps);
1042 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
1043 ret = txa_dev_queue_add(id) ?
1044 txa_dev_queue_add(id)(id,
1049 ret = txa_service_queue_add(id, txa_evdev(id), eth_dev, queue);
1051 rte_eventdev_trace_eth_tx_adapter_queue_add(id, eth_dev_id, queue,
1057 rte_event_eth_tx_adapter_queue_del(uint8_t id,
1058 uint16_t eth_dev_id,
1061 struct rte_eth_dev *eth_dev;
1065 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
1066 TXA_CHECK_OR_ERR_RET(id);
1068 eth_dev = &rte_eth_devices[eth_dev_id];
1072 if (txa_dev_caps_get(id))
1073 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps);
1075 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
1076 ret = txa_dev_queue_del(id) ?
1077 txa_dev_queue_del(id)(id, txa_evdev(id),
1081 ret = txa_service_queue_del(id, eth_dev, queue);
1083 rte_eventdev_trace_eth_tx_adapter_queue_del(id, eth_dev_id, queue,
1089 rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1091 TXA_CHECK_OR_ERR_RET(id);
1093 return txa_service_id_get(id, service_id);
1097 rte_event_eth_tx_adapter_start(uint8_t id)
1101 TXA_CHECK_OR_ERR_RET(id);
1103 ret = txa_dev_start(id) ? txa_dev_start(id)(id, txa_evdev(id)) : 0;
1105 ret = txa_service_start(id);
1106 rte_eventdev_trace_eth_tx_adapter_start(id, ret);
1111 rte_event_eth_tx_adapter_stats_get(uint8_t id,
1112 struct rte_event_eth_tx_adapter_stats *stats)
1116 TXA_CHECK_OR_ERR_RET(id);
1121 *stats = (struct rte_event_eth_tx_adapter_stats){0};
1123 ret = txa_dev_stats_get(id) ?
1124 txa_dev_stats_get(id)(id, txa_evdev(id), stats) : 0;
1126 if (ret == 0 && txa_service_id_get(id, NULL) != ESRCH) {
1127 if (txa_dev_stats_get(id)) {
1128 struct rte_event_eth_tx_adapter_stats service_stats;
1130 ret = txa_service_stats_get(id, &service_stats);
1132 stats->tx_retry += service_stats.tx_retry;
1133 stats->tx_packets += service_stats.tx_packets;
1134 stats->tx_dropped += service_stats.tx_dropped;
1137 ret = txa_service_stats_get(id, stats);
1144 rte_event_eth_tx_adapter_stats_reset(uint8_t id)
1148 TXA_CHECK_OR_ERR_RET(id);
1150 ret = txa_dev_stats_reset(id) ?
1151 txa_dev_stats_reset(id)(id, txa_evdev(id)) : 0;
1153 ret = txa_service_stats_reset(id);
1158 rte_event_eth_tx_adapter_stop(uint8_t id)
1162 TXA_CHECK_OR_ERR_RET(id);
1164 ret = txa_dev_stop(id) ? txa_dev_stop(id)(id, txa_evdev(id)) : 0;
1166 ret = txa_service_stop(id);
1167 rte_eventdev_trace_eth_tx_adapter_stop(id, ret);