1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "rte_ethdev_driver.h"
41 #include "ethdev_profile.h"
43 static int ethdev_logtype;
45 #define ethdev_log(level, fmt, ...) \
46 rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50 static uint8_t eth_dev_last_created_port;
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
64 /* store statistics names and its offset in stats structure */
65 struct rte_eth_xstats_name_off {
66 char name[RTE_ETH_XSTATS_NAME_SIZE];
70 /* Shared memory between primary and secondary processes. */
72 uint64_t next_owner_id;
73 rte_spinlock_t ownership_lock;
74 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94 {"errors", offsetof(struct rte_eth_stats, q_errors)},
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
98 sizeof(rte_rxq_stats_strings[0]))
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
105 sizeof(rte_txq_stats_strings[0]))
107 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
108 { DEV_RX_OFFLOAD_##_name, #_name }
110 static const struct {
113 } rte_rx_offload_names[] = {
114 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126 RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
127 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
132 #undef RTE_RX_OFFLOAD_BIT2STR
134 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
135 { DEV_TX_OFFLOAD_##_name, #_name }
137 static const struct {
140 } rte_tx_offload_names[] = {
141 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
142 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
143 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
144 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
145 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
146 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
147 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
148 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
149 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
150 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
151 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
152 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
153 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
154 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
155 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
156 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
157 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
158 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
161 #undef RTE_TX_OFFLOAD_BIT2STR
164 * The user application callback description.
166 * It contains callback address to be registered by user application,
167 * the pointer to the parameters for callback, and the event type.
169 struct rte_eth_dev_callback {
170 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
171 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
172 void *cb_arg; /**< Parameter for callback */
173 void *ret_param; /**< Return parameter */
174 enum rte_eth_event_type event; /**< Interrupt event type */
175 uint32_t active; /**< Callback is executing */
184 rte_eth_find_next(uint16_t port_id)
186 while (port_id < RTE_MAX_ETHPORTS &&
187 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
188 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
191 if (port_id >= RTE_MAX_ETHPORTS)
192 return RTE_MAX_ETHPORTS;
198 rte_eth_dev_shared_data_prepare(void)
200 const unsigned flags = 0;
201 const struct rte_memzone *mz;
203 rte_spinlock_lock(&rte_eth_shared_data_lock);
205 if (rte_eth_dev_shared_data == NULL) {
206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 /* Allocate port data and ownership shared memory. */
208 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
209 sizeof(*rte_eth_dev_shared_data),
210 rte_socket_id(), flags);
212 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
214 rte_panic("Cannot allocate ethdev shared data\n");
216 rte_eth_dev_shared_data = mz->addr;
217 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
218 rte_eth_dev_shared_data->next_owner_id =
219 RTE_ETH_DEV_NO_OWNER + 1;
220 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
221 memset(rte_eth_dev_shared_data->data, 0,
222 sizeof(rte_eth_dev_shared_data->data));
226 rte_spinlock_unlock(&rte_eth_shared_data_lock);
230 rte_eth_dev_allocated(const char *name)
234 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
235 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
236 strcmp(rte_eth_devices[i].data->name, name) == 0)
237 return &rte_eth_devices[i];
243 rte_eth_dev_find_free_port(void)
247 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
248 /* Using shared name field to find a free port. */
249 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
250 RTE_ASSERT(rte_eth_devices[i].state ==
255 return RTE_MAX_ETHPORTS;
258 static struct rte_eth_dev *
259 eth_dev_get(uint16_t port_id)
261 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
263 eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
264 eth_dev->state = RTE_ETH_DEV_ATTACHED;
266 eth_dev_last_created_port = port_id;
272 rte_eth_dev_allocate(const char *name)
275 struct rte_eth_dev *eth_dev = NULL;
277 rte_eth_dev_shared_data_prepare();
279 /* Synchronize port creation between primary and secondary threads. */
280 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
282 port_id = rte_eth_dev_find_free_port();
283 if (port_id == RTE_MAX_ETHPORTS) {
284 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
288 if (rte_eth_dev_allocated(name) != NULL) {
290 "Ethernet Device with name %s already allocated!",
295 eth_dev = eth_dev_get(port_id);
296 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
297 eth_dev->data->port_id = port_id;
298 eth_dev->data->mtu = ETHER_MTU;
301 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
304 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
310 * Attach to a port already registered by the primary process, which
311 * makes sure that the same device would have the same port id both
312 * in the primary and secondary process.
315 rte_eth_dev_attach_secondary(const char *name)
318 struct rte_eth_dev *eth_dev = NULL;
320 rte_eth_dev_shared_data_prepare();
322 /* Synchronize port attachment to primary port creation and release. */
323 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
325 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
326 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
329 if (i == RTE_MAX_ETHPORTS) {
331 "device %s is not driven by the primary process\n",
334 eth_dev = eth_dev_get(i);
335 RTE_ASSERT(eth_dev->data->port_id == i);
338 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
343 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
348 rte_eth_dev_shared_data_prepare();
350 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
352 eth_dev->state = RTE_ETH_DEV_UNUSED;
354 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
356 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
358 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
364 rte_eth_dev_is_valid_port(uint16_t port_id)
366 if (port_id >= RTE_MAX_ETHPORTS ||
367 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
374 rte_eth_is_valid_owner_id(uint64_t owner_id)
376 if (owner_id == RTE_ETH_DEV_NO_OWNER ||
377 rte_eth_dev_shared_data->next_owner_id <= owner_id) {
378 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
384 uint64_t __rte_experimental
385 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
387 while (port_id < RTE_MAX_ETHPORTS &&
388 ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
389 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
390 rte_eth_devices[port_id].data->owner.id != owner_id))
393 if (port_id >= RTE_MAX_ETHPORTS)
394 return RTE_MAX_ETHPORTS;
399 int __rte_experimental
400 rte_eth_dev_owner_new(uint64_t *owner_id)
402 rte_eth_dev_shared_data_prepare();
404 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
406 *owner_id = rte_eth_dev_shared_data->next_owner_id++;
408 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
413 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
414 const struct rte_eth_dev_owner *new_owner)
416 struct rte_eth_dev_owner *port_owner;
419 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
421 if (!rte_eth_is_valid_owner_id(new_owner->id) &&
422 !rte_eth_is_valid_owner_id(old_owner_id))
425 port_owner = &rte_eth_devices[port_id].data->owner;
426 if (port_owner->id != old_owner_id) {
427 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
428 " by %s_%016lX.\n", port_id,
429 port_owner->name, port_owner->id);
433 sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
435 if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
436 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
439 port_owner->id = new_owner->id;
441 RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
442 new_owner->name, new_owner->id);
447 int __rte_experimental
448 rte_eth_dev_owner_set(const uint16_t port_id,
449 const struct rte_eth_dev_owner *owner)
453 rte_eth_dev_shared_data_prepare();
455 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
457 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
459 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
463 int __rte_experimental
464 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
466 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
467 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
470 rte_eth_dev_shared_data_prepare();
472 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
474 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
476 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
480 void __rte_experimental
481 rte_eth_dev_owner_delete(const uint64_t owner_id)
485 rte_eth_dev_shared_data_prepare();
487 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
489 if (rte_eth_is_valid_owner_id(owner_id)) {
490 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
491 memset(&rte_eth_devices[port_id].data->owner, 0,
492 sizeof(struct rte_eth_dev_owner));
493 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
494 " have removed.\n", owner_id);
497 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
500 int __rte_experimental
501 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
505 rte_eth_dev_shared_data_prepare();
507 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
509 if (!rte_eth_dev_is_valid_port(port_id)) {
510 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
513 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
517 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
522 rte_eth_dev_socket_id(uint16_t port_id)
524 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
525 return rte_eth_devices[port_id].data->numa_node;
529 rte_eth_dev_get_sec_ctx(uint16_t port_id)
531 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
532 return rte_eth_devices[port_id].security_ctx;
536 rte_eth_dev_count(void)
538 return rte_eth_dev_count_avail();
542 rte_eth_dev_count_avail(void)
549 RTE_ETH_FOREACH_DEV(p)
556 rte_eth_dev_count_total(void)
558 uint16_t port, count = 0;
560 for (port = 0; port < RTE_MAX_ETHPORTS; port++)
561 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
568 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
575 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
579 /* shouldn't check 'rte_eth_devices[i].data',
580 * because it might be overwritten by VDEV PMD */
581 tmp = rte_eth_dev_shared_data->data[port_id].name;
587 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
592 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
596 for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
597 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
598 !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
608 eth_err(uint16_t port_id, int ret)
612 if (rte_eth_dev_is_removed(port_id))
617 /* attach the new device, then store port_id of the device */
619 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
622 int current = rte_eth_dev_count_total();
626 if ((devargs == NULL) || (port_id == NULL)) {
631 /* parse devargs, then retrieve device name and args */
632 if (rte_eal_parse_devargs_str(devargs, &name, &args))
635 ret = rte_eal_dev_attach(name, args);
639 /* no point looking at the port count if no port exists */
640 if (!rte_eth_dev_count_total()) {
641 ethdev_log(ERR, "No port found for device (%s)", name);
646 /* if nothing happened, there is a bug here, since some driver told us
647 * it did attach a device, but did not create a port.
648 * FIXME: race condition in case of plug-out of another device
650 if (current == rte_eth_dev_count_total()) {
655 *port_id = eth_dev_last_created_port;
664 /* detach the device, then store the name of the device */
666 rte_eth_dev_detach(uint16_t port_id, char *name)
671 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
678 dev_flags = rte_eth_devices[port_id].data->dev_flags;
679 if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
681 "Port %" PRIu16 " is bonded, cannot detach", port_id);
686 snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
687 "%s", rte_eth_devices[port_id].data->name);
689 ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
693 rte_eth_dev_release_port(&rte_eth_devices[port_id]);
701 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
703 uint16_t old_nb_queues = dev->data->nb_rx_queues;
707 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
708 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
709 sizeof(dev->data->rx_queues[0]) * nb_queues,
710 RTE_CACHE_LINE_SIZE);
711 if (dev->data->rx_queues == NULL) {
712 dev->data->nb_rx_queues = 0;
715 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
716 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
718 rxq = dev->data->rx_queues;
720 for (i = nb_queues; i < old_nb_queues; i++)
721 (*dev->dev_ops->rx_queue_release)(rxq[i]);
722 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
723 RTE_CACHE_LINE_SIZE);
726 if (nb_queues > old_nb_queues) {
727 uint16_t new_qs = nb_queues - old_nb_queues;
729 memset(rxq + old_nb_queues, 0,
730 sizeof(rxq[0]) * new_qs);
733 dev->data->rx_queues = rxq;
735 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
736 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
738 rxq = dev->data->rx_queues;
740 for (i = nb_queues; i < old_nb_queues; i++)
741 (*dev->dev_ops->rx_queue_release)(rxq[i]);
743 rte_free(dev->data->rx_queues);
744 dev->data->rx_queues = NULL;
746 dev->data->nb_rx_queues = nb_queues;
751 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
753 struct rte_eth_dev *dev;
755 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
757 dev = &rte_eth_devices[port_id];
758 if (!dev->data->dev_started) {
760 "port %d must be started before start any queue\n", port_id);
764 if (rx_queue_id >= dev->data->nb_rx_queues) {
765 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
771 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
772 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
773 " already started\n",
774 rx_queue_id, port_id);
778 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
784 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
786 struct rte_eth_dev *dev;
788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
790 dev = &rte_eth_devices[port_id];
791 if (rx_queue_id >= dev->data->nb_rx_queues) {
792 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
796 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
798 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
799 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
800 " already stopped\n",
801 rx_queue_id, port_id);
805 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
810 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
812 struct rte_eth_dev *dev;
814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
816 dev = &rte_eth_devices[port_id];
817 if (!dev->data->dev_started) {
819 "port %d must be started before start any queue\n", port_id);
823 if (tx_queue_id >= dev->data->nb_tx_queues) {
824 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
828 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
830 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
831 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
832 " already started\n",
833 tx_queue_id, port_id);
837 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
843 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
845 struct rte_eth_dev *dev;
847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
849 dev = &rte_eth_devices[port_id];
850 if (tx_queue_id >= dev->data->nb_tx_queues) {
851 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
855 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
857 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
858 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
859 " already stopped\n",
860 tx_queue_id, port_id);
864 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
869 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
871 uint16_t old_nb_queues = dev->data->nb_tx_queues;
875 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
876 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
877 sizeof(dev->data->tx_queues[0]) * nb_queues,
878 RTE_CACHE_LINE_SIZE);
879 if (dev->data->tx_queues == NULL) {
880 dev->data->nb_tx_queues = 0;
883 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
884 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
886 txq = dev->data->tx_queues;
888 for (i = nb_queues; i < old_nb_queues; i++)
889 (*dev->dev_ops->tx_queue_release)(txq[i]);
890 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
891 RTE_CACHE_LINE_SIZE);
894 if (nb_queues > old_nb_queues) {
895 uint16_t new_qs = nb_queues - old_nb_queues;
897 memset(txq + old_nb_queues, 0,
898 sizeof(txq[0]) * new_qs);
901 dev->data->tx_queues = txq;
903 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
904 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
906 txq = dev->data->tx_queues;
908 for (i = nb_queues; i < old_nb_queues; i++)
909 (*dev->dev_ops->tx_queue_release)(txq[i]);
911 rte_free(dev->data->tx_queues);
912 dev->data->tx_queues = NULL;
914 dev->data->nb_tx_queues = nb_queues;
919 rte_eth_speed_bitflag(uint32_t speed, int duplex)
922 case ETH_SPEED_NUM_10M:
923 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
924 case ETH_SPEED_NUM_100M:
925 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
926 case ETH_SPEED_NUM_1G:
927 return ETH_LINK_SPEED_1G;
928 case ETH_SPEED_NUM_2_5G:
929 return ETH_LINK_SPEED_2_5G;
930 case ETH_SPEED_NUM_5G:
931 return ETH_LINK_SPEED_5G;
932 case ETH_SPEED_NUM_10G:
933 return ETH_LINK_SPEED_10G;
934 case ETH_SPEED_NUM_20G:
935 return ETH_LINK_SPEED_20G;
936 case ETH_SPEED_NUM_25G:
937 return ETH_LINK_SPEED_25G;
938 case ETH_SPEED_NUM_40G:
939 return ETH_LINK_SPEED_40G;
940 case ETH_SPEED_NUM_50G:
941 return ETH_LINK_SPEED_50G;
942 case ETH_SPEED_NUM_56G:
943 return ETH_LINK_SPEED_56G;
944 case ETH_SPEED_NUM_100G:
945 return ETH_LINK_SPEED_100G;
952 * A conversion function from rxmode bitfield API.
955 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
956 uint64_t *rx_offloads)
958 uint64_t offloads = 0;
960 if (rxmode->header_split == 1)
961 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
962 if (rxmode->hw_ip_checksum == 1)
963 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
964 if (rxmode->hw_vlan_filter == 1)
965 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
966 if (rxmode->hw_vlan_strip == 1)
967 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
968 if (rxmode->hw_vlan_extend == 1)
969 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
970 if (rxmode->jumbo_frame == 1)
971 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
972 if (rxmode->hw_strip_crc == 1)
973 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
974 if (rxmode->enable_scatter == 1)
975 offloads |= DEV_RX_OFFLOAD_SCATTER;
976 if (rxmode->enable_lro == 1)
977 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
978 if (rxmode->hw_timestamp == 1)
979 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
980 if (rxmode->security == 1)
981 offloads |= DEV_RX_OFFLOAD_SECURITY;
983 *rx_offloads = offloads;
987 * A conversion function from rxmode offloads API.
990 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
991 struct rte_eth_rxmode *rxmode)
994 if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
995 rxmode->header_split = 1;
997 rxmode->header_split = 0;
998 if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
999 rxmode->hw_ip_checksum = 1;
1001 rxmode->hw_ip_checksum = 0;
1002 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1003 rxmode->hw_vlan_filter = 1;
1005 rxmode->hw_vlan_filter = 0;
1006 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1007 rxmode->hw_vlan_strip = 1;
1009 rxmode->hw_vlan_strip = 0;
1010 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1011 rxmode->hw_vlan_extend = 1;
1013 rxmode->hw_vlan_extend = 0;
1014 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1015 rxmode->jumbo_frame = 1;
1017 rxmode->jumbo_frame = 0;
1018 if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
1019 rxmode->hw_strip_crc = 1;
1021 rxmode->hw_strip_crc = 0;
1022 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
1023 rxmode->enable_scatter = 1;
1025 rxmode->enable_scatter = 0;
1026 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
1027 rxmode->enable_lro = 1;
1029 rxmode->enable_lro = 0;
1030 if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
1031 rxmode->hw_timestamp = 1;
1033 rxmode->hw_timestamp = 0;
1034 if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
1035 rxmode->security = 1;
1037 rxmode->security = 0;
1040 const char * __rte_experimental
1041 rte_eth_dev_rx_offload_name(uint64_t offload)
1043 const char *name = "UNKNOWN";
1046 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1047 if (offload == rte_rx_offload_names[i].offload) {
1048 name = rte_rx_offload_names[i].name;
1056 const char * __rte_experimental
1057 rte_eth_dev_tx_offload_name(uint64_t offload)
1059 const char *name = "UNKNOWN";
1062 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1063 if (offload == rte_tx_offload_names[i].offload) {
1064 name = rte_tx_offload_names[i].name;
1073 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1074 const struct rte_eth_conf *dev_conf)
1076 struct rte_eth_dev *dev;
1077 struct rte_eth_dev_info dev_info;
1078 struct rte_eth_conf local_conf = *dev_conf;
1081 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1083 dev = &rte_eth_devices[port_id];
1085 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1086 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1088 /* If number of queues specified by application for both Rx and Tx is
1089 * zero, use driver preferred values. This cannot be done individually
1090 * as it is valid for either Tx or Rx (but not both) to be zero.
1091 * If driver does not provide any preferred valued, fall back on
1094 if (nb_rx_q == 0 && nb_tx_q == 0) {
1095 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1097 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1098 nb_tx_q = dev_info.default_txportconf.nb_queues;
1100 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1103 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1104 RTE_PMD_DEBUG_TRACE(
1105 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1106 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1110 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1111 RTE_PMD_DEBUG_TRACE(
1112 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1113 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1117 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1118 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1120 if (dev->data->dev_started) {
1121 RTE_PMD_DEBUG_TRACE(
1122 "port %d must be stopped to allow configuration\n", port_id);
1127 * Convert between the offloads API to enable PMDs to support
1130 if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1131 rte_eth_convert_rx_offload_bitfield(
1132 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1134 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1135 &local_conf.rxmode);
1138 /* Copy the dev_conf parameter into the dev structure */
1139 memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1142 * Check that the numbers of RX and TX queues are not greater
1143 * than the maximum number of RX and TX queues supported by the
1144 * configured device.
1146 if (nb_rx_q > dev_info.max_rx_queues) {
1147 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1148 port_id, nb_rx_q, dev_info.max_rx_queues);
1152 if (nb_tx_q > dev_info.max_tx_queues) {
1153 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1154 port_id, nb_tx_q, dev_info.max_tx_queues);
1158 /* Check that the device supports requested interrupts */
1159 if ((dev_conf->intr_conf.lsc == 1) &&
1160 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1161 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1162 dev->device->driver->name);
1165 if ((dev_conf->intr_conf.rmv == 1) &&
1166 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1167 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1168 dev->device->driver->name);
1173 * If jumbo frames are enabled, check that the maximum RX packet
1174 * length is supported by the configured device.
1176 if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1177 if (dev_conf->rxmode.max_rx_pkt_len >
1178 dev_info.max_rx_pktlen) {
1179 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1180 " > max valid value %u\n",
1182 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1183 (unsigned)dev_info.max_rx_pktlen);
1185 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1186 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1187 " < min valid value %u\n",
1189 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1190 (unsigned)ETHER_MIN_LEN);
1194 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1195 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1196 /* Use default value */
1197 dev->data->dev_conf.rxmode.max_rx_pkt_len =
1202 * Setup new number of RX/TX queues and reconfigure device.
1204 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1206 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1211 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1213 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1215 rte_eth_dev_rx_queue_config(dev, 0);
1219 diag = (*dev->dev_ops->dev_configure)(dev);
1221 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1223 rte_eth_dev_rx_queue_config(dev, 0);
1224 rte_eth_dev_tx_queue_config(dev, 0);
1225 return eth_err(port_id, diag);
1228 /* Initialize Rx profiling if enabled at compilation time. */
1229 diag = __rte_eth_profile_rx_init(port_id, dev);
1231 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1233 rte_eth_dev_rx_queue_config(dev, 0);
1234 rte_eth_dev_tx_queue_config(dev, 0);
1235 return eth_err(port_id, diag);
1242 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1244 if (dev->data->dev_started) {
1245 RTE_PMD_DEBUG_TRACE(
1246 "port %d must be stopped to allow reset\n",
1247 dev->data->port_id);
1251 rte_eth_dev_rx_queue_config(dev, 0);
1252 rte_eth_dev_tx_queue_config(dev, 0);
1254 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1258 rte_eth_dev_config_restore(uint16_t port_id)
1260 struct rte_eth_dev *dev;
1261 struct rte_eth_dev_info dev_info;
1262 struct ether_addr *addr;
1267 dev = &rte_eth_devices[port_id];
1269 rte_eth_dev_info_get(port_id, &dev_info);
1271 /* replay MAC address configuration including default MAC */
1272 addr = &dev->data->mac_addrs[0];
1273 if (*dev->dev_ops->mac_addr_set != NULL)
1274 (*dev->dev_ops->mac_addr_set)(dev, addr);
1275 else if (*dev->dev_ops->mac_addr_add != NULL)
1276 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1278 if (*dev->dev_ops->mac_addr_add != NULL) {
1279 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1280 addr = &dev->data->mac_addrs[i];
1282 /* skip zero address */
1283 if (is_zero_ether_addr(addr))
1287 pool_mask = dev->data->mac_pool_sel[i];
1290 if (pool_mask & 1ULL)
1291 (*dev->dev_ops->mac_addr_add)(dev,
1295 } while (pool_mask);
1299 /* replay promiscuous configuration */
1300 if (rte_eth_promiscuous_get(port_id) == 1)
1301 rte_eth_promiscuous_enable(port_id);
1302 else if (rte_eth_promiscuous_get(port_id) == 0)
1303 rte_eth_promiscuous_disable(port_id);
1305 /* replay all multicast configuration */
1306 if (rte_eth_allmulticast_get(port_id) == 1)
1307 rte_eth_allmulticast_enable(port_id);
1308 else if (rte_eth_allmulticast_get(port_id) == 0)
1309 rte_eth_allmulticast_disable(port_id);
1313 rte_eth_dev_start(uint16_t port_id)
1315 struct rte_eth_dev *dev;
1318 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1320 dev = &rte_eth_devices[port_id];
1322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1324 if (dev->data->dev_started != 0) {
1325 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1326 " already started\n",
1331 diag = (*dev->dev_ops->dev_start)(dev);
1333 dev->data->dev_started = 1;
1335 return eth_err(port_id, diag);
1337 rte_eth_dev_config_restore(port_id);
1339 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1340 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1341 (*dev->dev_ops->link_update)(dev, 0);
1347 rte_eth_dev_stop(uint16_t port_id)
1349 struct rte_eth_dev *dev;
1351 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1352 dev = &rte_eth_devices[port_id];
1354 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1356 if (dev->data->dev_started == 0) {
1357 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1358 " already stopped\n",
1363 dev->data->dev_started = 0;
1364 (*dev->dev_ops->dev_stop)(dev);
1368 rte_eth_dev_set_link_up(uint16_t port_id)
1370 struct rte_eth_dev *dev;
1372 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1374 dev = &rte_eth_devices[port_id];
1376 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1377 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1381 rte_eth_dev_set_link_down(uint16_t port_id)
1383 struct rte_eth_dev *dev;
1385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1387 dev = &rte_eth_devices[port_id];
1389 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1390 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1394 rte_eth_dev_close(uint16_t port_id)
1396 struct rte_eth_dev *dev;
1398 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1399 dev = &rte_eth_devices[port_id];
1401 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1402 dev->data->dev_started = 0;
1403 (*dev->dev_ops->dev_close)(dev);
1405 dev->data->nb_rx_queues = 0;
1406 rte_free(dev->data->rx_queues);
1407 dev->data->rx_queues = NULL;
1408 dev->data->nb_tx_queues = 0;
1409 rte_free(dev->data->tx_queues);
1410 dev->data->tx_queues = NULL;
1414 rte_eth_dev_reset(uint16_t port_id)
1416 struct rte_eth_dev *dev;
1419 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1420 dev = &rte_eth_devices[port_id];
1422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1424 rte_eth_dev_stop(port_id);
1425 ret = dev->dev_ops->dev_reset(dev);
1427 return eth_err(port_id, ret);
1430 int __rte_experimental
1431 rte_eth_dev_is_removed(uint16_t port_id)
1433 struct rte_eth_dev *dev;
1436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1438 dev = &rte_eth_devices[port_id];
1440 if (dev->state == RTE_ETH_DEV_REMOVED)
1443 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1445 ret = dev->dev_ops->is_removed(dev);
1447 /* Device is physically removed. */
1448 dev->state = RTE_ETH_DEV_REMOVED;
1454 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1455 uint16_t nb_rx_desc, unsigned int socket_id,
1456 const struct rte_eth_rxconf *rx_conf,
1457 struct rte_mempool *mp)
1460 uint32_t mbp_buf_size;
1461 struct rte_eth_dev *dev;
1462 struct rte_eth_dev_info dev_info;
1463 struct rte_eth_rxconf local_conf;
1466 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1468 dev = &rte_eth_devices[port_id];
1469 if (rx_queue_id >= dev->data->nb_rx_queues) {
1470 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1474 if (dev->data->dev_started) {
1475 RTE_PMD_DEBUG_TRACE(
1476 "port %d must be stopped to allow configuration\n", port_id);
1480 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1481 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1484 * Check the size of the mbuf data buffer.
1485 * This value must be provided in the private data of the memory pool.
1486 * First check that the memory pool has a valid private data.
1488 rte_eth_dev_info_get(port_id, &dev_info);
1489 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1490 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1491 mp->name, (int) mp->private_data_size,
1492 (int) sizeof(struct rte_pktmbuf_pool_private));
1495 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1497 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1498 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1499 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1503 (int)(RTE_PKTMBUF_HEADROOM +
1504 dev_info.min_rx_bufsize),
1505 (int)RTE_PKTMBUF_HEADROOM,
1506 (int)dev_info.min_rx_bufsize);
1510 /* Use default specified by driver, if nb_rx_desc is zero */
1511 if (nb_rx_desc == 0) {
1512 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1513 /* If driver default is also zero, fall back on EAL default */
1514 if (nb_rx_desc == 0)
1515 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1518 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1519 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1520 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1522 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1523 "should be: <= %hu, = %hu, and a product of %hu\n",
1525 dev_info.rx_desc_lim.nb_max,
1526 dev_info.rx_desc_lim.nb_min,
1527 dev_info.rx_desc_lim.nb_align);
1531 rxq = dev->data->rx_queues;
1532 if (rxq[rx_queue_id]) {
1533 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1535 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1536 rxq[rx_queue_id] = NULL;
1539 if (rx_conf == NULL)
1540 rx_conf = &dev_info.default_rxconf;
1542 local_conf = *rx_conf;
1543 if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1545 * Reflect port offloads to queue offloads in order for
1546 * offloads to not be discarded.
1548 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1549 &local_conf.offloads);
1552 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1553 socket_id, &local_conf, mp);
1555 if (!dev->data->min_rx_buf_size ||
1556 dev->data->min_rx_buf_size > mbp_buf_size)
1557 dev->data->min_rx_buf_size = mbp_buf_size;
1560 return eth_err(port_id, ret);
1564 * A conversion function from txq_flags API.
1567 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1569 uint64_t offloads = 0;
1571 if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1572 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1573 if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1574 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1575 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1576 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1577 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1578 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1579 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1580 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1581 if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1582 (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1583 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1585 *tx_offloads = offloads;
1589 * A conversion function from offloads API.
1592 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1596 if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1597 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1598 if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1599 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1600 if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1601 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1602 if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1603 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1604 if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1605 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1606 if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1607 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1613 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1614 uint16_t nb_tx_desc, unsigned int socket_id,
1615 const struct rte_eth_txconf *tx_conf)
1617 struct rte_eth_dev *dev;
1618 struct rte_eth_dev_info dev_info;
1619 struct rte_eth_txconf local_conf;
1622 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1624 dev = &rte_eth_devices[port_id];
1625 if (tx_queue_id >= dev->data->nb_tx_queues) {
1626 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1630 if (dev->data->dev_started) {
1631 RTE_PMD_DEBUG_TRACE(
1632 "port %d must be stopped to allow configuration\n", port_id);
1636 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1637 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1639 rte_eth_dev_info_get(port_id, &dev_info);
1641 /* Use default specified by driver, if nb_tx_desc is zero */
1642 if (nb_tx_desc == 0) {
1643 nb_tx_desc = dev_info.default_txportconf.ring_size;
1644 /* If driver default is zero, fall back on EAL default */
1645 if (nb_tx_desc == 0)
1646 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1648 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1649 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1650 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1651 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1652 "should be: <= %hu, = %hu, and a product of %hu\n",
1654 dev_info.tx_desc_lim.nb_max,
1655 dev_info.tx_desc_lim.nb_min,
1656 dev_info.tx_desc_lim.nb_align);
1660 txq = dev->data->tx_queues;
1661 if (txq[tx_queue_id]) {
1662 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1664 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1665 txq[tx_queue_id] = NULL;
1668 if (tx_conf == NULL)
1669 tx_conf = &dev_info.default_txconf;
1672 * Convert between the offloads API to enable PMDs to support
1675 local_conf = *tx_conf;
1676 if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1677 rte_eth_convert_txq_offloads(tx_conf->offloads,
1678 &local_conf.txq_flags);
1679 /* Keep the ignore flag. */
1680 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1682 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1683 &local_conf.offloads);
1686 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1687 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1691 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1692 void *userdata __rte_unused)
1696 for (i = 0; i < unsent; i++)
1697 rte_pktmbuf_free(pkts[i]);
1701 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1704 uint64_t *count = userdata;
1707 for (i = 0; i < unsent; i++)
1708 rte_pktmbuf_free(pkts[i]);
1714 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1715 buffer_tx_error_fn cbfn, void *userdata)
1717 buffer->error_callback = cbfn;
1718 buffer->error_userdata = userdata;
1723 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1730 buffer->size = size;
1731 if (buffer->error_callback == NULL) {
1732 ret = rte_eth_tx_buffer_set_err_callback(
1733 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1740 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1742 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1745 /* Validate Input Data. Bail if not valid or not supported. */
1746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1747 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1749 /* Call driver to free pending mbufs. */
1750 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1752 return eth_err(port_id, ret);
1756 rte_eth_promiscuous_enable(uint16_t port_id)
1758 struct rte_eth_dev *dev;
1760 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1761 dev = &rte_eth_devices[port_id];
1763 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1764 (*dev->dev_ops->promiscuous_enable)(dev);
1765 dev->data->promiscuous = 1;
1769 rte_eth_promiscuous_disable(uint16_t port_id)
1771 struct rte_eth_dev *dev;
1773 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1774 dev = &rte_eth_devices[port_id];
1776 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1777 dev->data->promiscuous = 0;
1778 (*dev->dev_ops->promiscuous_disable)(dev);
1782 rte_eth_promiscuous_get(uint16_t port_id)
1784 struct rte_eth_dev *dev;
1786 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1788 dev = &rte_eth_devices[port_id];
1789 return dev->data->promiscuous;
1793 rte_eth_allmulticast_enable(uint16_t port_id)
1795 struct rte_eth_dev *dev;
1797 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1798 dev = &rte_eth_devices[port_id];
1800 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1801 (*dev->dev_ops->allmulticast_enable)(dev);
1802 dev->data->all_multicast = 1;
1806 rte_eth_allmulticast_disable(uint16_t port_id)
1808 struct rte_eth_dev *dev;
1810 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1811 dev = &rte_eth_devices[port_id];
1813 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1814 dev->data->all_multicast = 0;
1815 (*dev->dev_ops->allmulticast_disable)(dev);
1819 rte_eth_allmulticast_get(uint16_t port_id)
1821 struct rte_eth_dev *dev;
1823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1825 dev = &rte_eth_devices[port_id];
1826 return dev->data->all_multicast;
1830 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1832 struct rte_eth_dev *dev;
1834 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1835 dev = &rte_eth_devices[port_id];
1837 if (dev->data->dev_conf.intr_conf.lsc &&
1838 dev->data->dev_started)
1839 rte_eth_linkstatus_get(dev, eth_link);
1841 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1842 (*dev->dev_ops->link_update)(dev, 1);
1843 *eth_link = dev->data->dev_link;
1848 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1850 struct rte_eth_dev *dev;
1852 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1853 dev = &rte_eth_devices[port_id];
1855 if (dev->data->dev_conf.intr_conf.lsc &&
1856 dev->data->dev_started)
1857 rte_eth_linkstatus_get(dev, eth_link);
1859 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1860 (*dev->dev_ops->link_update)(dev, 0);
1861 *eth_link = dev->data->dev_link;
1866 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1868 struct rte_eth_dev *dev;
1870 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1872 dev = &rte_eth_devices[port_id];
1873 memset(stats, 0, sizeof(*stats));
1875 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1876 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1877 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1881 rte_eth_stats_reset(uint16_t port_id)
1883 struct rte_eth_dev *dev;
1885 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1886 dev = &rte_eth_devices[port_id];
1888 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1889 (*dev->dev_ops->stats_reset)(dev);
1890 dev->data->rx_mbuf_alloc_failed = 0;
1896 get_xstats_basic_count(struct rte_eth_dev *dev)
1898 uint16_t nb_rxqs, nb_txqs;
1901 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1902 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1904 count = RTE_NB_STATS;
1905 count += nb_rxqs * RTE_NB_RXQ_STATS;
1906 count += nb_txqs * RTE_NB_TXQ_STATS;
1912 get_xstats_count(uint16_t port_id)
1914 struct rte_eth_dev *dev;
1917 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1918 dev = &rte_eth_devices[port_id];
1919 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1920 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1923 return eth_err(port_id, count);
1925 if (dev->dev_ops->xstats_get_names != NULL) {
1926 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1928 return eth_err(port_id, count);
1933 count += get_xstats_basic_count(dev);
1939 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1942 int cnt_xstats, idx_xstat;
1944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1947 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1952 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1957 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1958 if (cnt_xstats < 0) {
1959 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1963 /* Get id-name lookup table */
1964 struct rte_eth_xstat_name xstats_names[cnt_xstats];
1966 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1967 port_id, xstats_names, cnt_xstats, NULL)) {
1968 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1972 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1973 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1982 /* retrieve basic stats names */
1984 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1985 struct rte_eth_xstat_name *xstats_names)
1987 int cnt_used_entries = 0;
1988 uint32_t idx, id_queue;
1991 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1992 snprintf(xstats_names[cnt_used_entries].name,
1993 sizeof(xstats_names[0].name),
1994 "%s", rte_stats_strings[idx].name);
1997 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1998 for (id_queue = 0; id_queue < num_q; id_queue++) {
1999 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2000 snprintf(xstats_names[cnt_used_entries].name,
2001 sizeof(xstats_names[0].name),
2003 id_queue, rte_rxq_stats_strings[idx].name);
2008 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2009 for (id_queue = 0; id_queue < num_q; id_queue++) {
2010 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2011 snprintf(xstats_names[cnt_used_entries].name,
2012 sizeof(xstats_names[0].name),
2014 id_queue, rte_txq_stats_strings[idx].name);
2018 return cnt_used_entries;
2021 /* retrieve ethdev extended statistics names */
2023 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2024 struct rte_eth_xstat_name *xstats_names, unsigned int size,
2027 struct rte_eth_xstat_name *xstats_names_copy;
2028 unsigned int no_basic_stat_requested = 1;
2029 unsigned int no_ext_stat_requested = 1;
2030 unsigned int expected_entries;
2031 unsigned int basic_count;
2032 struct rte_eth_dev *dev;
2036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2037 dev = &rte_eth_devices[port_id];
2039 basic_count = get_xstats_basic_count(dev);
2040 ret = get_xstats_count(port_id);
2043 expected_entries = (unsigned int)ret;
2045 /* Return max number of stats if no ids given */
2048 return expected_entries;
2049 else if (xstats_names && size < expected_entries)
2050 return expected_entries;
2053 if (ids && !xstats_names)
2056 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2057 uint64_t ids_copy[size];
2059 for (i = 0; i < size; i++) {
2060 if (ids[i] < basic_count) {
2061 no_basic_stat_requested = 0;
2066 * Convert ids to xstats ids that PMD knows.
2067 * ids known by user are basic + extended stats.
2069 ids_copy[i] = ids[i] - basic_count;
2072 if (no_basic_stat_requested)
2073 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2074 xstats_names, ids_copy, size);
2077 /* Retrieve all stats */
2079 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2081 if (num_stats < 0 || num_stats > (int)expected_entries)
2084 return expected_entries;
2087 xstats_names_copy = calloc(expected_entries,
2088 sizeof(struct rte_eth_xstat_name));
2090 if (!xstats_names_copy) {
2091 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2096 for (i = 0; i < size; i++) {
2097 if (ids[i] >= basic_count) {
2098 no_ext_stat_requested = 0;
2104 /* Fill xstats_names_copy structure */
2105 if (ids && no_ext_stat_requested) {
2106 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2108 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2111 free(xstats_names_copy);
2117 for (i = 0; i < size; i++) {
2118 if (ids[i] >= expected_entries) {
2119 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2120 free(xstats_names_copy);
2123 xstats_names[i] = xstats_names_copy[ids[i]];
2126 free(xstats_names_copy);
2131 rte_eth_xstats_get_names(uint16_t port_id,
2132 struct rte_eth_xstat_name *xstats_names,
2135 struct rte_eth_dev *dev;
2136 int cnt_used_entries;
2137 int cnt_expected_entries;
2138 int cnt_driver_entries;
2140 cnt_expected_entries = get_xstats_count(port_id);
2141 if (xstats_names == NULL || cnt_expected_entries < 0 ||
2142 (int)size < cnt_expected_entries)
2143 return cnt_expected_entries;
2145 /* port_id checked in get_xstats_count() */
2146 dev = &rte_eth_devices[port_id];
2148 cnt_used_entries = rte_eth_basic_stats_get_names(
2151 if (dev->dev_ops->xstats_get_names != NULL) {
2152 /* If there are any driver-specific xstats, append them
2155 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2157 xstats_names + cnt_used_entries,
2158 size - cnt_used_entries);
2159 if (cnt_driver_entries < 0)
2160 return eth_err(port_id, cnt_driver_entries);
2161 cnt_used_entries += cnt_driver_entries;
2164 return cnt_used_entries;
2169 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2171 struct rte_eth_dev *dev;
2172 struct rte_eth_stats eth_stats;
2173 unsigned int count = 0, i, q;
2174 uint64_t val, *stats_ptr;
2175 uint16_t nb_rxqs, nb_txqs;
2178 ret = rte_eth_stats_get(port_id, ð_stats);
2182 dev = &rte_eth_devices[port_id];
2184 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2185 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2188 for (i = 0; i < RTE_NB_STATS; i++) {
2189 stats_ptr = RTE_PTR_ADD(ð_stats,
2190 rte_stats_strings[i].offset);
2192 xstats[count++].value = val;
2196 for (q = 0; q < nb_rxqs; q++) {
2197 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2198 stats_ptr = RTE_PTR_ADD(ð_stats,
2199 rte_rxq_stats_strings[i].offset +
2200 q * sizeof(uint64_t));
2202 xstats[count++].value = val;
2207 for (q = 0; q < nb_txqs; q++) {
2208 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2209 stats_ptr = RTE_PTR_ADD(ð_stats,
2210 rte_txq_stats_strings[i].offset +
2211 q * sizeof(uint64_t));
2213 xstats[count++].value = val;
2219 /* retrieve ethdev extended statistics */
2221 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2222 uint64_t *values, unsigned int size)
2224 unsigned int no_basic_stat_requested = 1;
2225 unsigned int no_ext_stat_requested = 1;
2226 unsigned int num_xstats_filled;
2227 unsigned int basic_count;
2228 uint16_t expected_entries;
2229 struct rte_eth_dev *dev;
2233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2234 ret = get_xstats_count(port_id);
2237 expected_entries = (uint16_t)ret;
2238 struct rte_eth_xstat xstats[expected_entries];
2239 dev = &rte_eth_devices[port_id];
2240 basic_count = get_xstats_basic_count(dev);
2242 /* Return max number of stats if no ids given */
2245 return expected_entries;
2246 else if (values && size < expected_entries)
2247 return expected_entries;
2253 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2254 unsigned int basic_count = get_xstats_basic_count(dev);
2255 uint64_t ids_copy[size];
2257 for (i = 0; i < size; i++) {
2258 if (ids[i] < basic_count) {
2259 no_basic_stat_requested = 0;
2264 * Convert ids to xstats ids that PMD knows.
2265 * ids known by user are basic + extended stats.
2267 ids_copy[i] = ids[i] - basic_count;
2270 if (no_basic_stat_requested)
2271 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2276 for (i = 0; i < size; i++) {
2277 if (ids[i] >= basic_count) {
2278 no_ext_stat_requested = 0;
2284 /* Fill the xstats structure */
2285 if (ids && no_ext_stat_requested)
2286 ret = rte_eth_basic_stats_get(port_id, xstats);
2288 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2292 num_xstats_filled = (unsigned int)ret;
2294 /* Return all stats */
2296 for (i = 0; i < num_xstats_filled; i++)
2297 values[i] = xstats[i].value;
2298 return expected_entries;
2302 for (i = 0; i < size; i++) {
2303 if (ids[i] >= expected_entries) {
2304 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2307 values[i] = xstats[ids[i]].value;
2313 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2316 struct rte_eth_dev *dev;
2317 unsigned int count = 0, i;
2318 signed int xcount = 0;
2319 uint16_t nb_rxqs, nb_txqs;
2322 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2324 dev = &rte_eth_devices[port_id];
2326 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2327 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2329 /* Return generic statistics */
2330 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2331 (nb_txqs * RTE_NB_TXQ_STATS);
2333 /* implemented by the driver */
2334 if (dev->dev_ops->xstats_get != NULL) {
2335 /* Retrieve the xstats from the driver at the end of the
2338 xcount = (*dev->dev_ops->xstats_get)(dev,
2339 xstats ? xstats + count : NULL,
2340 (n > count) ? n - count : 0);
2343 return eth_err(port_id, xcount);
2346 if (n < count + xcount || xstats == NULL)
2347 return count + xcount;
2349 /* now fill the xstats structure */
2350 ret = rte_eth_basic_stats_get(port_id, xstats);
2355 for (i = 0; i < count; i++)
2357 /* add an offset to driver-specific stats */
2358 for ( ; i < count + xcount; i++)
2359 xstats[i].id += count;
2361 return count + xcount;
2364 /* reset ethdev extended statistics */
2366 rte_eth_xstats_reset(uint16_t port_id)
2368 struct rte_eth_dev *dev;
2370 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2371 dev = &rte_eth_devices[port_id];
2373 /* implemented by the driver */
2374 if (dev->dev_ops->xstats_reset != NULL) {
2375 (*dev->dev_ops->xstats_reset)(dev);
2379 /* fallback to default */
2380 rte_eth_stats_reset(port_id);
2384 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2387 struct rte_eth_dev *dev;
2389 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2391 dev = &rte_eth_devices[port_id];
2393 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2394 return (*dev->dev_ops->queue_stats_mapping_set)
2395 (dev, queue_id, stat_idx, is_rx);
2400 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2403 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2404 stat_idx, STAT_QMAP_TX));
2409 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2412 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2413 stat_idx, STAT_QMAP_RX));
2417 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2419 struct rte_eth_dev *dev;
2421 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2422 dev = &rte_eth_devices[port_id];
2424 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2425 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2426 fw_version, fw_size));
2430 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2432 struct rte_eth_dev *dev;
2433 const struct rte_eth_desc_lim lim = {
2434 .nb_max = UINT16_MAX,
2439 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2440 dev = &rte_eth_devices[port_id];
2442 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2443 dev_info->rx_desc_lim = lim;
2444 dev_info->tx_desc_lim = lim;
2445 dev_info->device = dev->device;
2447 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2448 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2449 dev_info->driver_name = dev->device->driver->name;
2450 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2451 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2455 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2456 uint32_t *ptypes, int num)
2459 struct rte_eth_dev *dev;
2460 const uint32_t *all_ptypes;
2462 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2463 dev = &rte_eth_devices[port_id];
2464 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2465 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2470 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2471 if (all_ptypes[i] & ptype_mask) {
2473 ptypes[j] = all_ptypes[i];
2481 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2483 struct rte_eth_dev *dev;
2485 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2486 dev = &rte_eth_devices[port_id];
2487 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2492 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2494 struct rte_eth_dev *dev;
2496 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2498 dev = &rte_eth_devices[port_id];
2499 *mtu = dev->data->mtu;
2504 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2507 struct rte_eth_dev *dev;
2509 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2510 dev = &rte_eth_devices[port_id];
2511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2513 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2515 dev->data->mtu = mtu;
2517 return eth_err(port_id, ret);
2521 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2523 struct rte_eth_dev *dev;
2526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2527 dev = &rte_eth_devices[port_id];
2528 if (!(dev->data->dev_conf.rxmode.offloads &
2529 DEV_RX_OFFLOAD_VLAN_FILTER)) {
2530 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2534 if (vlan_id > 4095) {
2535 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2536 port_id, (unsigned) vlan_id);
2539 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2541 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2543 struct rte_vlan_filter_conf *vfc;
2547 vfc = &dev->data->vlan_filter_conf;
2548 vidx = vlan_id / 64;
2549 vbit = vlan_id % 64;
2552 vfc->ids[vidx] |= UINT64_C(1) << vbit;
2554 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2557 return eth_err(port_id, ret);
2561 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2564 struct rte_eth_dev *dev;
2566 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2567 dev = &rte_eth_devices[port_id];
2568 if (rx_queue_id >= dev->data->nb_rx_queues) {
2569 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2573 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2574 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2580 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2581 enum rte_vlan_type vlan_type,
2584 struct rte_eth_dev *dev;
2586 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2587 dev = &rte_eth_devices[port_id];
2588 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2590 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2595 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2597 struct rte_eth_dev *dev;
2601 uint64_t orig_offloads;
2603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2604 dev = &rte_eth_devices[port_id];
2606 /* save original values in case of failure */
2607 orig_offloads = dev->data->dev_conf.rxmode.offloads;
2609 /*check which option changed by application*/
2610 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2611 org = !!(dev->data->dev_conf.rxmode.offloads &
2612 DEV_RX_OFFLOAD_VLAN_STRIP);
2615 dev->data->dev_conf.rxmode.offloads |=
2616 DEV_RX_OFFLOAD_VLAN_STRIP;
2618 dev->data->dev_conf.rxmode.offloads &=
2619 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2620 mask |= ETH_VLAN_STRIP_MASK;
2623 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2624 org = !!(dev->data->dev_conf.rxmode.offloads &
2625 DEV_RX_OFFLOAD_VLAN_FILTER);
2628 dev->data->dev_conf.rxmode.offloads |=
2629 DEV_RX_OFFLOAD_VLAN_FILTER;
2631 dev->data->dev_conf.rxmode.offloads &=
2632 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2633 mask |= ETH_VLAN_FILTER_MASK;
2636 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2637 org = !!(dev->data->dev_conf.rxmode.offloads &
2638 DEV_RX_OFFLOAD_VLAN_EXTEND);
2641 dev->data->dev_conf.rxmode.offloads |=
2642 DEV_RX_OFFLOAD_VLAN_EXTEND;
2644 dev->data->dev_conf.rxmode.offloads &=
2645 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2646 mask |= ETH_VLAN_EXTEND_MASK;
2653 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2656 * Convert to the offload bitfield API just in case the underlying PMD
2657 * still supporting it.
2659 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2660 &dev->data->dev_conf.rxmode);
2661 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2663 /* hit an error restore original values */
2664 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2665 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2666 &dev->data->dev_conf.rxmode);
2669 return eth_err(port_id, ret);
2673 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2675 struct rte_eth_dev *dev;
2678 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679 dev = &rte_eth_devices[port_id];
2681 if (dev->data->dev_conf.rxmode.offloads &
2682 DEV_RX_OFFLOAD_VLAN_STRIP)
2683 ret |= ETH_VLAN_STRIP_OFFLOAD;
2685 if (dev->data->dev_conf.rxmode.offloads &
2686 DEV_RX_OFFLOAD_VLAN_FILTER)
2687 ret |= ETH_VLAN_FILTER_OFFLOAD;
2689 if (dev->data->dev_conf.rxmode.offloads &
2690 DEV_RX_OFFLOAD_VLAN_EXTEND)
2691 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2697 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2699 struct rte_eth_dev *dev;
2701 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2702 dev = &rte_eth_devices[port_id];
2703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2705 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2709 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2711 struct rte_eth_dev *dev;
2713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2714 dev = &rte_eth_devices[port_id];
2715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2716 memset(fc_conf, 0, sizeof(*fc_conf));
2717 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2721 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2723 struct rte_eth_dev *dev;
2725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2726 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2727 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2731 dev = &rte_eth_devices[port_id];
2732 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2733 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2737 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2738 struct rte_eth_pfc_conf *pfc_conf)
2740 struct rte_eth_dev *dev;
2742 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2744 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2748 dev = &rte_eth_devices[port_id];
2749 /* High water, low water validation are device specific */
2750 if (*dev->dev_ops->priority_flow_ctrl_set)
2751 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2757 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2765 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2766 for (i = 0; i < num; i++) {
2767 if (reta_conf[i].mask)
2775 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2779 uint16_t i, idx, shift;
2785 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2789 for (i = 0; i < reta_size; i++) {
2790 idx = i / RTE_RETA_GROUP_SIZE;
2791 shift = i % RTE_RETA_GROUP_SIZE;
2792 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2793 (reta_conf[idx].reta[shift] >= max_rxq)) {
2794 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2795 "the maximum rxq index: %u\n", idx, shift,
2796 reta_conf[idx].reta[shift], max_rxq);
2805 rte_eth_dev_rss_reta_update(uint16_t port_id,
2806 struct rte_eth_rss_reta_entry64 *reta_conf,
2809 struct rte_eth_dev *dev;
2812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2813 /* Check mask bits */
2814 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2818 dev = &rte_eth_devices[port_id];
2820 /* Check entry value */
2821 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2822 dev->data->nb_rx_queues);
2826 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2827 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2832 rte_eth_dev_rss_reta_query(uint16_t port_id,
2833 struct rte_eth_rss_reta_entry64 *reta_conf,
2836 struct rte_eth_dev *dev;
2839 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2841 /* Check mask bits */
2842 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2846 dev = &rte_eth_devices[port_id];
2847 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2848 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2853 rte_eth_dev_rss_hash_update(uint16_t port_id,
2854 struct rte_eth_rss_conf *rss_conf)
2856 struct rte_eth_dev *dev;
2858 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2859 dev = &rte_eth_devices[port_id];
2860 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2861 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2866 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2867 struct rte_eth_rss_conf *rss_conf)
2869 struct rte_eth_dev *dev;
2871 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2872 dev = &rte_eth_devices[port_id];
2873 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2874 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2879 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2880 struct rte_eth_udp_tunnel *udp_tunnel)
2882 struct rte_eth_dev *dev;
2884 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2885 if (udp_tunnel == NULL) {
2886 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2890 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2891 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2895 dev = &rte_eth_devices[port_id];
2896 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2897 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2902 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2903 struct rte_eth_udp_tunnel *udp_tunnel)
2905 struct rte_eth_dev *dev;
2907 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2908 dev = &rte_eth_devices[port_id];
2910 if (udp_tunnel == NULL) {
2911 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2915 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2916 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2920 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2921 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2926 rte_eth_led_on(uint16_t port_id)
2928 struct rte_eth_dev *dev;
2930 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2931 dev = &rte_eth_devices[port_id];
2932 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2933 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2937 rte_eth_led_off(uint16_t port_id)
2939 struct rte_eth_dev *dev;
2941 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2942 dev = &rte_eth_devices[port_id];
2943 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2944 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2948 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2952 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2954 struct rte_eth_dev_info dev_info;
2955 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2958 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2959 rte_eth_dev_info_get(port_id, &dev_info);
2961 for (i = 0; i < dev_info.max_mac_addrs; i++)
2962 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2968 static const struct ether_addr null_mac_addr;
2971 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2974 struct rte_eth_dev *dev;
2979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2980 dev = &rte_eth_devices[port_id];
2981 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2983 if (is_zero_ether_addr(addr)) {
2984 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2988 if (pool >= ETH_64_POOLS) {
2989 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2993 index = get_mac_addr_index(port_id, addr);
2995 index = get_mac_addr_index(port_id, &null_mac_addr);
2997 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3002 pool_mask = dev->data->mac_pool_sel[index];
3004 /* Check if both MAC address and pool is already there, and do nothing */
3005 if (pool_mask & (1ULL << pool))
3010 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3013 /* Update address in NIC data structure */
3014 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3016 /* Update pool bitmap in NIC data structure */
3017 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3020 return eth_err(port_id, ret);
3024 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3026 struct rte_eth_dev *dev;
3029 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3030 dev = &rte_eth_devices[port_id];
3031 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3033 index = get_mac_addr_index(port_id, addr);
3035 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
3037 } else if (index < 0)
3038 return 0; /* Do nothing if address wasn't found */
3041 (*dev->dev_ops->mac_addr_remove)(dev, index);
3043 /* Update address in NIC data structure */
3044 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3046 /* reset pool bitmap */
3047 dev->data->mac_pool_sel[index] = 0;
3053 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3055 struct rte_eth_dev *dev;
3058 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3060 if (!is_valid_assigned_ether_addr(addr))
3063 dev = &rte_eth_devices[port_id];
3064 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3066 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3070 /* Update default address in NIC data structure */
3071 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3078 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3082 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3084 struct rte_eth_dev_info dev_info;
3085 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3088 rte_eth_dev_info_get(port_id, &dev_info);
3089 if (!dev->data->hash_mac_addrs)
3092 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3093 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3094 ETHER_ADDR_LEN) == 0)
3101 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3106 struct rte_eth_dev *dev;
3108 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3110 dev = &rte_eth_devices[port_id];
3111 if (is_zero_ether_addr(addr)) {
3112 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3117 index = get_hash_mac_addr_index(port_id, addr);
3118 /* Check if it's already there, and do nothing */
3119 if ((index >= 0) && on)
3124 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3125 "set in UTA\n", port_id);
3129 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3131 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3137 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3138 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3140 /* Update address in NIC data structure */
3142 ether_addr_copy(addr,
3143 &dev->data->hash_mac_addrs[index]);
3145 ether_addr_copy(&null_mac_addr,
3146 &dev->data->hash_mac_addrs[index]);
3149 return eth_err(port_id, ret);
3153 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3155 struct rte_eth_dev *dev;
3157 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159 dev = &rte_eth_devices[port_id];
3161 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3162 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3166 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3169 struct rte_eth_dev *dev;
3170 struct rte_eth_dev_info dev_info;
3171 struct rte_eth_link link;
3173 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3175 dev = &rte_eth_devices[port_id];
3176 rte_eth_dev_info_get(port_id, &dev_info);
3177 link = dev->data->dev_link;
3179 if (queue_idx > dev_info.max_tx_queues) {
3180 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3181 "invalid queue id=%d\n", port_id, queue_idx);
3185 if (tx_rate > link.link_speed) {
3186 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3187 "bigger than link speed= %d\n",
3188 tx_rate, link.link_speed);
3192 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3193 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3194 queue_idx, tx_rate));
3198 rte_eth_mirror_rule_set(uint16_t port_id,
3199 struct rte_eth_mirror_conf *mirror_conf,
3200 uint8_t rule_id, uint8_t on)
3202 struct rte_eth_dev *dev;
3204 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3205 if (mirror_conf->rule_type == 0) {
3206 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3210 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3211 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3216 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3217 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3218 (mirror_conf->pool_mask == 0)) {
3219 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3223 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3224 mirror_conf->vlan.vlan_mask == 0) {
3225 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3229 dev = &rte_eth_devices[port_id];
3230 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3232 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3233 mirror_conf, rule_id, on));
3237 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3239 struct rte_eth_dev *dev;
3241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3243 dev = &rte_eth_devices[port_id];
3244 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3246 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3250 RTE_INIT(eth_dev_init_cb_lists)
3254 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3255 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3259 rte_eth_dev_callback_register(uint16_t port_id,
3260 enum rte_eth_event_type event,
3261 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3263 struct rte_eth_dev *dev;
3264 struct rte_eth_dev_callback *user_cb;
3265 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3271 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3272 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3276 if (port_id == RTE_ETH_ALL) {
3278 last_port = RTE_MAX_ETHPORTS - 1;
3280 next_port = last_port = port_id;
3283 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3286 dev = &rte_eth_devices[next_port];
3288 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3289 if (user_cb->cb_fn == cb_fn &&
3290 user_cb->cb_arg == cb_arg &&
3291 user_cb->event == event) {
3296 /* create a new callback. */
3297 if (user_cb == NULL) {
3298 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3299 sizeof(struct rte_eth_dev_callback), 0);
3300 if (user_cb != NULL) {
3301 user_cb->cb_fn = cb_fn;
3302 user_cb->cb_arg = cb_arg;
3303 user_cb->event = event;
3304 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3307 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3308 rte_eth_dev_callback_unregister(port_id, event,
3314 } while (++next_port <= last_port);
3316 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3321 rte_eth_dev_callback_unregister(uint16_t port_id,
3322 enum rte_eth_event_type event,
3323 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3326 struct rte_eth_dev *dev;
3327 struct rte_eth_dev_callback *cb, *next;
3328 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3334 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3335 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3339 if (port_id == RTE_ETH_ALL) {
3341 last_port = RTE_MAX_ETHPORTS - 1;
3343 next_port = last_port = port_id;
3346 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3349 dev = &rte_eth_devices[next_port];
3351 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3354 next = TAILQ_NEXT(cb, next);
3356 if (cb->cb_fn != cb_fn || cb->event != event ||
3357 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3361 * if this callback is not executing right now,
3364 if (cb->active == 0) {
3365 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3371 } while (++next_port <= last_port);
3373 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3378 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3379 enum rte_eth_event_type event, void *ret_param)
3381 struct rte_eth_dev_callback *cb_lst;
3382 struct rte_eth_dev_callback dev_cb;
3385 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3386 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3387 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3391 if (ret_param != NULL)
3392 dev_cb.ret_param = ret_param;
3394 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3395 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3396 dev_cb.cb_arg, dev_cb.ret_param);
3397 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3400 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3405 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3408 struct rte_eth_dev *dev;
3409 struct rte_intr_handle *intr_handle;
3413 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3415 dev = &rte_eth_devices[port_id];
3417 if (!dev->intr_handle) {
3418 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3422 intr_handle = dev->intr_handle;
3423 if (!intr_handle->intr_vec) {
3424 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3428 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3429 vec = intr_handle->intr_vec[qid];
3430 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3431 if (rc && rc != -EEXIST) {
3432 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3433 " op %d epfd %d vec %u\n",
3434 port_id, qid, op, epfd, vec);
3441 const struct rte_memzone *
3442 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3443 uint16_t queue_id, size_t size, unsigned align,
3446 char z_name[RTE_MEMZONE_NAMESIZE];
3447 const struct rte_memzone *mz;
3449 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3450 dev->device->driver->name, ring_name,
3451 dev->data->port_id, queue_id);
3453 mz = rte_memzone_lookup(z_name);
3457 return rte_memzone_reserve_aligned(z_name, size, socket_id,
3458 RTE_MEMZONE_IOVA_CONTIG, align);
3462 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3463 int epfd, int op, void *data)
3466 struct rte_eth_dev *dev;
3467 struct rte_intr_handle *intr_handle;
3470 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3472 dev = &rte_eth_devices[port_id];
3473 if (queue_id >= dev->data->nb_rx_queues) {
3474 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3478 if (!dev->intr_handle) {
3479 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3483 intr_handle = dev->intr_handle;
3484 if (!intr_handle->intr_vec) {
3485 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3489 vec = intr_handle->intr_vec[queue_id];
3490 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3491 if (rc && rc != -EEXIST) {
3492 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3493 " op %d epfd %d vec %u\n",
3494 port_id, queue_id, op, epfd, vec);
3502 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3505 struct rte_eth_dev *dev;
3507 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3509 dev = &rte_eth_devices[port_id];
3511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3512 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3517 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3520 struct rte_eth_dev *dev;
3522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3524 dev = &rte_eth_devices[port_id];
3526 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3527 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3533 rte_eth_dev_filter_supported(uint16_t port_id,
3534 enum rte_filter_type filter_type)
3536 struct rte_eth_dev *dev;
3538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3540 dev = &rte_eth_devices[port_id];
3541 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3542 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3543 RTE_ETH_FILTER_NOP, NULL);
3547 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3548 enum rte_filter_op filter_op, void *arg)
3550 struct rte_eth_dev *dev;
3552 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3554 dev = &rte_eth_devices[port_id];
3555 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3556 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3560 const struct rte_eth_rxtx_callback *
3561 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3562 rte_rx_callback_fn fn, void *user_param)
3564 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3565 rte_errno = ENOTSUP;
3568 /* check input parameters */
3569 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3570 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3574 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3582 cb->param = user_param;
3584 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3585 /* Add the callbacks in fifo order. */
3586 struct rte_eth_rxtx_callback *tail =
3587 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3590 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3597 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3602 const struct rte_eth_rxtx_callback *
3603 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3604 rte_rx_callback_fn fn, void *user_param)
3606 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3607 rte_errno = ENOTSUP;
3610 /* check input parameters */
3611 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3612 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3617 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3625 cb->param = user_param;
3627 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3628 /* Add the callbacks at fisrt position*/
3629 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3631 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3632 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3637 const struct rte_eth_rxtx_callback *
3638 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3639 rte_tx_callback_fn fn, void *user_param)
3641 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3642 rte_errno = ENOTSUP;
3645 /* check input parameters */
3646 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3647 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3652 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3660 cb->param = user_param;
3662 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3663 /* Add the callbacks in fifo order. */
3664 struct rte_eth_rxtx_callback *tail =
3665 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3668 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3675 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3681 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3682 const struct rte_eth_rxtx_callback *user_cb)
3684 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3687 /* Check input parameters. */
3688 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3689 if (user_cb == NULL ||
3690 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3693 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3694 struct rte_eth_rxtx_callback *cb;
3695 struct rte_eth_rxtx_callback **prev_cb;
3698 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3699 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3700 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3702 if (cb == user_cb) {
3703 /* Remove the user cb from the callback list. */
3704 *prev_cb = cb->next;
3709 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3715 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3716 const struct rte_eth_rxtx_callback *user_cb)
3718 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3721 /* Check input parameters. */
3722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3723 if (user_cb == NULL ||
3724 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3727 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3729 struct rte_eth_rxtx_callback *cb;
3730 struct rte_eth_rxtx_callback **prev_cb;
3732 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3733 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3734 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3736 if (cb == user_cb) {
3737 /* Remove the user cb from the callback list. */
3738 *prev_cb = cb->next;
3743 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3749 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3750 struct rte_eth_rxq_info *qinfo)
3752 struct rte_eth_dev *dev;
3754 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3759 dev = &rte_eth_devices[port_id];
3760 if (queue_id >= dev->data->nb_rx_queues) {
3761 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3765 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3767 memset(qinfo, 0, sizeof(*qinfo));
3768 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3773 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3774 struct rte_eth_txq_info *qinfo)
3776 struct rte_eth_dev *dev;
3778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3783 dev = &rte_eth_devices[port_id];
3784 if (queue_id >= dev->data->nb_tx_queues) {
3785 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3789 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3791 memset(qinfo, 0, sizeof(*qinfo));
3792 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3797 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3798 struct ether_addr *mc_addr_set,
3799 uint32_t nb_mc_addr)
3801 struct rte_eth_dev *dev;
3803 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3805 dev = &rte_eth_devices[port_id];
3806 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3807 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3808 mc_addr_set, nb_mc_addr));
3812 rte_eth_timesync_enable(uint16_t port_id)
3814 struct rte_eth_dev *dev;
3816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3817 dev = &rte_eth_devices[port_id];
3819 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3820 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3824 rte_eth_timesync_disable(uint16_t port_id)
3826 struct rte_eth_dev *dev;
3828 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3829 dev = &rte_eth_devices[port_id];
3831 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3832 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3836 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3839 struct rte_eth_dev *dev;
3841 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3842 dev = &rte_eth_devices[port_id];
3844 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3845 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3846 (dev, timestamp, flags));
3850 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3851 struct timespec *timestamp)
3853 struct rte_eth_dev *dev;
3855 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3856 dev = &rte_eth_devices[port_id];
3858 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3859 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3864 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3866 struct rte_eth_dev *dev;
3868 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3869 dev = &rte_eth_devices[port_id];
3871 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3872 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3877 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3879 struct rte_eth_dev *dev;
3881 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3882 dev = &rte_eth_devices[port_id];
3884 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3885 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3890 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3892 struct rte_eth_dev *dev;
3894 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3895 dev = &rte_eth_devices[port_id];
3897 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3898 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3903 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3905 struct rte_eth_dev *dev;
3907 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3909 dev = &rte_eth_devices[port_id];
3910 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3911 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3915 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3917 struct rte_eth_dev *dev;
3919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3921 dev = &rte_eth_devices[port_id];
3922 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3923 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3927 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3929 struct rte_eth_dev *dev;
3931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3933 dev = &rte_eth_devices[port_id];
3934 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3935 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3939 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3941 struct rte_eth_dev *dev;
3943 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3945 dev = &rte_eth_devices[port_id];
3946 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3947 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3951 rte_eth_dev_get_dcb_info(uint16_t port_id,
3952 struct rte_eth_dcb_info *dcb_info)
3954 struct rte_eth_dev *dev;
3956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3958 dev = &rte_eth_devices[port_id];
3959 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3961 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3962 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3966 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3967 struct rte_eth_l2_tunnel_conf *l2_tunnel)
3969 struct rte_eth_dev *dev;
3971 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3972 if (l2_tunnel == NULL) {
3973 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3977 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3978 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3982 dev = &rte_eth_devices[port_id];
3983 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3985 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3990 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3991 struct rte_eth_l2_tunnel_conf *l2_tunnel,
3995 struct rte_eth_dev *dev;
3997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3999 if (l2_tunnel == NULL) {
4000 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4004 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4005 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4010 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4014 dev = &rte_eth_devices[port_id];
4015 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4017 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4018 l2_tunnel, mask, en));
4022 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4023 const struct rte_eth_desc_lim *desc_lim)
4025 if (desc_lim->nb_align != 0)
4026 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4028 if (desc_lim->nb_max != 0)
4029 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4031 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4035 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4036 uint16_t *nb_rx_desc,
4037 uint16_t *nb_tx_desc)
4039 struct rte_eth_dev *dev;
4040 struct rte_eth_dev_info dev_info;
4042 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4044 dev = &rte_eth_devices[port_id];
4045 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4047 rte_eth_dev_info_get(port_id, &dev_info);
4049 if (nb_rx_desc != NULL)
4050 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4052 if (nb_tx_desc != NULL)
4053 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4059 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4061 struct rte_eth_dev *dev;
4063 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4068 dev = &rte_eth_devices[port_id];
4070 if (*dev->dev_ops->pool_ops_supported == NULL)
4071 return 1; /* all pools are supported */
4073 return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4076 RTE_INIT(ethdev_init_log);
4078 ethdev_init_log(void)
4080 ethdev_logtype = rte_log_register("lib.ethdev");
4081 if (ethdev_logtype >= 0)
4082 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);