1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "rte_ethdev_driver.h"
41 #include "ethdev_profile.h"
43 static int ethdev_logtype;
45 #define ethdev_log(level, fmt, ...) \
46 rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50 static uint8_t eth_dev_last_created_port;
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
64 /* store statistics names and its offset in stats structure */
65 struct rte_eth_xstats_name_off {
66 char name[RTE_ETH_XSTATS_NAME_SIZE];
70 /* Shared memory between primary and secondary processes. */
72 uint64_t next_owner_id;
73 rte_spinlock_t ownership_lock;
74 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94 {"errors", offsetof(struct rte_eth_stats, q_errors)},
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
98 sizeof(rte_rxq_stats_strings[0]))
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
105 sizeof(rte_txq_stats_strings[0]))
107 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
108 { DEV_RX_OFFLOAD_##_name, #_name }
110 static const struct {
113 } rte_rx_offload_names[] = {
114 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126 RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
127 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
132 #undef RTE_RX_OFFLOAD_BIT2STR
134 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
135 { DEV_TX_OFFLOAD_##_name, #_name }
137 static const struct {
140 } rte_tx_offload_names[] = {
141 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
142 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
143 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
144 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
145 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
146 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
147 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
148 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
149 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
150 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
151 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
152 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
153 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
154 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
155 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
156 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
157 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
158 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
161 #undef RTE_TX_OFFLOAD_BIT2STR
164 * The user application callback description.
166 * It contains callback address to be registered by user application,
167 * the pointer to the parameters for callback, and the event type.
169 struct rte_eth_dev_callback {
170 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
171 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
172 void *cb_arg; /**< Parameter for callback */
173 void *ret_param; /**< Return parameter */
174 enum rte_eth_event_type event; /**< Interrupt event type */
175 uint32_t active; /**< Callback is executing */
184 rte_eth_find_next(uint16_t port_id)
186 while (port_id < RTE_MAX_ETHPORTS &&
187 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
188 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
191 if (port_id >= RTE_MAX_ETHPORTS)
192 return RTE_MAX_ETHPORTS;
198 rte_eth_dev_shared_data_prepare(void)
200 const unsigned flags = 0;
201 const struct rte_memzone *mz;
203 rte_spinlock_lock(&rte_eth_shared_data_lock);
205 if (rte_eth_dev_shared_data == NULL) {
206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 /* Allocate port data and ownership shared memory. */
208 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
209 sizeof(*rte_eth_dev_shared_data),
210 rte_socket_id(), flags);
212 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
214 rte_panic("Cannot allocate ethdev shared data\n");
216 rte_eth_dev_shared_data = mz->addr;
217 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
218 rte_eth_dev_shared_data->next_owner_id =
219 RTE_ETH_DEV_NO_OWNER + 1;
220 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
221 memset(rte_eth_dev_shared_data->data, 0,
222 sizeof(rte_eth_dev_shared_data->data));
226 rte_spinlock_unlock(&rte_eth_shared_data_lock);
230 rte_eth_dev_allocated(const char *name)
234 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
235 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
236 strcmp(rte_eth_devices[i].data->name, name) == 0)
237 return &rte_eth_devices[i];
243 rte_eth_dev_find_free_port(void)
247 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
248 /* Using shared name field to find a free port. */
249 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
250 RTE_ASSERT(rte_eth_devices[i].state ==
255 return RTE_MAX_ETHPORTS;
258 static struct rte_eth_dev *
259 eth_dev_get(uint16_t port_id)
261 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
263 eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
264 eth_dev->state = RTE_ETH_DEV_ATTACHED;
266 eth_dev_last_created_port = port_id;
272 rte_eth_dev_allocate(const char *name)
275 struct rte_eth_dev *eth_dev = NULL;
277 rte_eth_dev_shared_data_prepare();
279 /* Synchronize port creation between primary and secondary threads. */
280 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
282 port_id = rte_eth_dev_find_free_port();
283 if (port_id == RTE_MAX_ETHPORTS) {
284 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
288 if (rte_eth_dev_allocated(name) != NULL) {
290 "Ethernet Device with name %s already allocated!",
295 eth_dev = eth_dev_get(port_id);
296 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
297 eth_dev->data->port_id = port_id;
298 eth_dev->data->mtu = ETHER_MTU;
301 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
304 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
310 * Attach to a port already registered by the primary process, which
311 * makes sure that the same device would have the same port id both
312 * in the primary and secondary process.
315 rte_eth_dev_attach_secondary(const char *name)
318 struct rte_eth_dev *eth_dev = NULL;
320 rte_eth_dev_shared_data_prepare();
322 /* Synchronize port attachment to primary port creation and release. */
323 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
325 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
326 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
329 if (i == RTE_MAX_ETHPORTS) {
331 "device %s is not driven by the primary process\n",
334 eth_dev = eth_dev_get(i);
335 RTE_ASSERT(eth_dev->data->port_id == i);
338 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
343 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
348 rte_eth_dev_shared_data_prepare();
350 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
352 eth_dev->state = RTE_ETH_DEV_UNUSED;
354 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
356 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
358 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
364 rte_eth_dev_is_valid_port(uint16_t port_id)
366 if (port_id >= RTE_MAX_ETHPORTS ||
367 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
374 rte_eth_is_valid_owner_id(uint64_t owner_id)
376 if (owner_id == RTE_ETH_DEV_NO_OWNER ||
377 rte_eth_dev_shared_data->next_owner_id <= owner_id) {
378 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
384 uint64_t __rte_experimental
385 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
387 while (port_id < RTE_MAX_ETHPORTS &&
388 ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
389 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
390 rte_eth_devices[port_id].data->owner.id != owner_id))
393 if (port_id >= RTE_MAX_ETHPORTS)
394 return RTE_MAX_ETHPORTS;
399 int __rte_experimental
400 rte_eth_dev_owner_new(uint64_t *owner_id)
402 rte_eth_dev_shared_data_prepare();
404 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
406 *owner_id = rte_eth_dev_shared_data->next_owner_id++;
408 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
413 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
414 const struct rte_eth_dev_owner *new_owner)
416 struct rte_eth_dev_owner *port_owner;
419 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
421 if (!rte_eth_is_valid_owner_id(new_owner->id) &&
422 !rte_eth_is_valid_owner_id(old_owner_id))
425 port_owner = &rte_eth_devices[port_id].data->owner;
426 if (port_owner->id != old_owner_id) {
427 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
428 " by %s_%016lX.\n", port_id,
429 port_owner->name, port_owner->id);
433 sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
435 if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
436 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
439 port_owner->id = new_owner->id;
441 RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
442 new_owner->name, new_owner->id);
447 int __rte_experimental
448 rte_eth_dev_owner_set(const uint16_t port_id,
449 const struct rte_eth_dev_owner *owner)
453 rte_eth_dev_shared_data_prepare();
455 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
457 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
459 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
463 int __rte_experimental
464 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
466 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
467 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
470 rte_eth_dev_shared_data_prepare();
472 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
474 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
476 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
480 void __rte_experimental
481 rte_eth_dev_owner_delete(const uint64_t owner_id)
485 rte_eth_dev_shared_data_prepare();
487 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
489 if (rte_eth_is_valid_owner_id(owner_id)) {
490 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
491 memset(&rte_eth_devices[port_id].data->owner, 0,
492 sizeof(struct rte_eth_dev_owner));
493 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
494 " have removed.\n", owner_id);
497 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
500 int __rte_experimental
501 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
505 rte_eth_dev_shared_data_prepare();
507 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
509 if (!rte_eth_dev_is_valid_port(port_id)) {
510 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
513 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
517 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
522 rte_eth_dev_socket_id(uint16_t port_id)
524 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
525 return rte_eth_devices[port_id].data->numa_node;
529 rte_eth_dev_get_sec_ctx(uint16_t port_id)
531 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
532 return rte_eth_devices[port_id].security_ctx;
536 rte_eth_dev_count(void)
538 return rte_eth_dev_count_avail();
542 rte_eth_dev_count_avail(void)
549 RTE_ETH_FOREACH_DEV(p)
556 rte_eth_dev_count_total(void)
558 uint16_t port, count = 0;
560 for (port = 0; port < RTE_MAX_ETHPORTS; port++)
561 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
568 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
575 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
579 /* shouldn't check 'rte_eth_devices[i].data',
580 * because it might be overwritten by VDEV PMD */
581 tmp = rte_eth_dev_shared_data->data[port_id].name;
587 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
592 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
596 for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
597 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
598 !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
608 eth_err(uint16_t port_id, int ret)
612 if (rte_eth_dev_is_removed(port_id))
617 /* attach the new device, then store port_id of the device */
619 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
621 int current = rte_eth_dev_count_total();
622 struct rte_devargs da;
625 memset(&da, 0, sizeof(da));
627 if ((devargs == NULL) || (port_id == NULL)) {
633 if (rte_devargs_parse(&da, "%s", devargs))
636 ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
640 /* no point looking at the port count if no port exists */
641 if (!rte_eth_dev_count_total()) {
642 ethdev_log(ERR, "No port found for device (%s)", da.name);
647 /* if nothing happened, there is a bug here, since some driver told us
648 * it did attach a device, but did not create a port.
649 * FIXME: race condition in case of plug-out of another device
651 if (current == rte_eth_dev_count_total()) {
656 *port_id = eth_dev_last_created_port;
664 /* detach the device, then store the name of the device */
666 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
668 struct rte_device *dev;
673 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
675 dev_flags = rte_eth_devices[port_id].data->dev_flags;
676 if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
678 "Port %" PRIu16 " is bonded, cannot detach", port_id);
682 dev = rte_eth_devices[port_id].device;
686 bus = rte_bus_find_by_device(dev);
690 ret = rte_eal_hotplug_remove(bus->name, dev->name);
694 rte_eth_dev_release_port(&rte_eth_devices[port_id]);
699 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
701 uint16_t old_nb_queues = dev->data->nb_rx_queues;
705 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
706 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
707 sizeof(dev->data->rx_queues[0]) * nb_queues,
708 RTE_CACHE_LINE_SIZE);
709 if (dev->data->rx_queues == NULL) {
710 dev->data->nb_rx_queues = 0;
713 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
714 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
716 rxq = dev->data->rx_queues;
718 for (i = nb_queues; i < old_nb_queues; i++)
719 (*dev->dev_ops->rx_queue_release)(rxq[i]);
720 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
721 RTE_CACHE_LINE_SIZE);
724 if (nb_queues > old_nb_queues) {
725 uint16_t new_qs = nb_queues - old_nb_queues;
727 memset(rxq + old_nb_queues, 0,
728 sizeof(rxq[0]) * new_qs);
731 dev->data->rx_queues = rxq;
733 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
734 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
736 rxq = dev->data->rx_queues;
738 for (i = nb_queues; i < old_nb_queues; i++)
739 (*dev->dev_ops->rx_queue_release)(rxq[i]);
741 rte_free(dev->data->rx_queues);
742 dev->data->rx_queues = NULL;
744 dev->data->nb_rx_queues = nb_queues;
749 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
751 struct rte_eth_dev *dev;
753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755 dev = &rte_eth_devices[port_id];
756 if (!dev->data->dev_started) {
758 "port %d must be started before start any queue\n", port_id);
762 if (rx_queue_id >= dev->data->nb_rx_queues) {
763 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
767 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
769 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
770 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
771 " already started\n",
772 rx_queue_id, port_id);
776 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
782 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
784 struct rte_eth_dev *dev;
786 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
788 dev = &rte_eth_devices[port_id];
789 if (rx_queue_id >= dev->data->nb_rx_queues) {
790 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
794 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
796 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
797 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
798 " already stopped\n",
799 rx_queue_id, port_id);
803 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
808 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
810 struct rte_eth_dev *dev;
812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
814 dev = &rte_eth_devices[port_id];
815 if (!dev->data->dev_started) {
817 "port %d must be started before start any queue\n", port_id);
821 if (tx_queue_id >= dev->data->nb_tx_queues) {
822 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
826 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
828 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
829 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
830 " already started\n",
831 tx_queue_id, port_id);
835 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
841 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
843 struct rte_eth_dev *dev;
845 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
847 dev = &rte_eth_devices[port_id];
848 if (tx_queue_id >= dev->data->nb_tx_queues) {
849 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
853 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
855 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
856 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
857 " already stopped\n",
858 tx_queue_id, port_id);
862 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
867 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
869 uint16_t old_nb_queues = dev->data->nb_tx_queues;
873 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
874 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
875 sizeof(dev->data->tx_queues[0]) * nb_queues,
876 RTE_CACHE_LINE_SIZE);
877 if (dev->data->tx_queues == NULL) {
878 dev->data->nb_tx_queues = 0;
881 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
884 txq = dev->data->tx_queues;
886 for (i = nb_queues; i < old_nb_queues; i++)
887 (*dev->dev_ops->tx_queue_release)(txq[i]);
888 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
889 RTE_CACHE_LINE_SIZE);
892 if (nb_queues > old_nb_queues) {
893 uint16_t new_qs = nb_queues - old_nb_queues;
895 memset(txq + old_nb_queues, 0,
896 sizeof(txq[0]) * new_qs);
899 dev->data->tx_queues = txq;
901 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
902 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
904 txq = dev->data->tx_queues;
906 for (i = nb_queues; i < old_nb_queues; i++)
907 (*dev->dev_ops->tx_queue_release)(txq[i]);
909 rte_free(dev->data->tx_queues);
910 dev->data->tx_queues = NULL;
912 dev->data->nb_tx_queues = nb_queues;
917 rte_eth_speed_bitflag(uint32_t speed, int duplex)
920 case ETH_SPEED_NUM_10M:
921 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
922 case ETH_SPEED_NUM_100M:
923 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
924 case ETH_SPEED_NUM_1G:
925 return ETH_LINK_SPEED_1G;
926 case ETH_SPEED_NUM_2_5G:
927 return ETH_LINK_SPEED_2_5G;
928 case ETH_SPEED_NUM_5G:
929 return ETH_LINK_SPEED_5G;
930 case ETH_SPEED_NUM_10G:
931 return ETH_LINK_SPEED_10G;
932 case ETH_SPEED_NUM_20G:
933 return ETH_LINK_SPEED_20G;
934 case ETH_SPEED_NUM_25G:
935 return ETH_LINK_SPEED_25G;
936 case ETH_SPEED_NUM_40G:
937 return ETH_LINK_SPEED_40G;
938 case ETH_SPEED_NUM_50G:
939 return ETH_LINK_SPEED_50G;
940 case ETH_SPEED_NUM_56G:
941 return ETH_LINK_SPEED_56G;
942 case ETH_SPEED_NUM_100G:
943 return ETH_LINK_SPEED_100G;
950 * A conversion function from rxmode bitfield API.
953 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
954 uint64_t *rx_offloads)
956 uint64_t offloads = 0;
958 if (rxmode->header_split == 1)
959 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
960 if (rxmode->hw_ip_checksum == 1)
961 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
962 if (rxmode->hw_vlan_filter == 1)
963 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
964 if (rxmode->hw_vlan_strip == 1)
965 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
966 if (rxmode->hw_vlan_extend == 1)
967 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
968 if (rxmode->jumbo_frame == 1)
969 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
970 if (rxmode->hw_strip_crc == 1)
971 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
972 if (rxmode->enable_scatter == 1)
973 offloads |= DEV_RX_OFFLOAD_SCATTER;
974 if (rxmode->enable_lro == 1)
975 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
976 if (rxmode->hw_timestamp == 1)
977 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
978 if (rxmode->security == 1)
979 offloads |= DEV_RX_OFFLOAD_SECURITY;
981 *rx_offloads = offloads;
984 const char * __rte_experimental
985 rte_eth_dev_rx_offload_name(uint64_t offload)
987 const char *name = "UNKNOWN";
990 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
991 if (offload == rte_rx_offload_names[i].offload) {
992 name = rte_rx_offload_names[i].name;
1000 const char * __rte_experimental
1001 rte_eth_dev_tx_offload_name(uint64_t offload)
1003 const char *name = "UNKNOWN";
1006 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1007 if (offload == rte_tx_offload_names[i].offload) {
1008 name = rte_tx_offload_names[i].name;
1017 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1018 const struct rte_eth_conf *dev_conf)
1020 struct rte_eth_dev *dev;
1021 struct rte_eth_dev_info dev_info;
1022 struct rte_eth_conf local_conf = *dev_conf;
1025 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1027 dev = &rte_eth_devices[port_id];
1029 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1030 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1032 /* If number of queues specified by application for both Rx and Tx is
1033 * zero, use driver preferred values. This cannot be done individually
1034 * as it is valid for either Tx or Rx (but not both) to be zero.
1035 * If driver does not provide any preferred valued, fall back on
1038 if (nb_rx_q == 0 && nb_tx_q == 0) {
1039 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1041 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1042 nb_tx_q = dev_info.default_txportconf.nb_queues;
1044 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1047 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1048 RTE_PMD_DEBUG_TRACE(
1049 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1050 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1054 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1055 RTE_PMD_DEBUG_TRACE(
1056 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1057 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1061 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1062 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1064 if (dev->data->dev_started) {
1065 RTE_PMD_DEBUG_TRACE(
1066 "port %d must be stopped to allow configuration\n", port_id);
1071 * Convert between the offloads API to enable PMDs to support
1074 if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1075 rte_eth_convert_rx_offload_bitfield(
1076 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1078 /* Copy the dev_conf parameter into the dev structure */
1079 memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1082 * Check that the numbers of RX and TX queues are not greater
1083 * than the maximum number of RX and TX queues supported by the
1084 * configured device.
1086 if (nb_rx_q > dev_info.max_rx_queues) {
1087 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1088 port_id, nb_rx_q, dev_info.max_rx_queues);
1092 if (nb_tx_q > dev_info.max_tx_queues) {
1093 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1094 port_id, nb_tx_q, dev_info.max_tx_queues);
1098 /* Check that the device supports requested interrupts */
1099 if ((dev_conf->intr_conf.lsc == 1) &&
1100 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1101 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1102 dev->device->driver->name);
1105 if ((dev_conf->intr_conf.rmv == 1) &&
1106 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1107 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1108 dev->device->driver->name);
1113 * If jumbo frames are enabled, check that the maximum RX packet
1114 * length is supported by the configured device.
1116 if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1117 if (dev_conf->rxmode.max_rx_pkt_len >
1118 dev_info.max_rx_pktlen) {
1119 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1120 " > max valid value %u\n",
1122 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1123 (unsigned)dev_info.max_rx_pktlen);
1125 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1126 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1127 " < min valid value %u\n",
1129 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1130 (unsigned)ETHER_MIN_LEN);
1134 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1135 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1136 /* Use default value */
1137 dev->data->dev_conf.rxmode.max_rx_pkt_len =
1142 * Setup new number of RX/TX queues and reconfigure device.
1144 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1146 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1151 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1153 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1155 rte_eth_dev_rx_queue_config(dev, 0);
1159 diag = (*dev->dev_ops->dev_configure)(dev);
1161 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1163 rte_eth_dev_rx_queue_config(dev, 0);
1164 rte_eth_dev_tx_queue_config(dev, 0);
1165 return eth_err(port_id, diag);
1168 /* Initialize Rx profiling if enabled at compilation time. */
1169 diag = __rte_eth_profile_rx_init(port_id, dev);
1171 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1173 rte_eth_dev_rx_queue_config(dev, 0);
1174 rte_eth_dev_tx_queue_config(dev, 0);
1175 return eth_err(port_id, diag);
1182 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1184 if (dev->data->dev_started) {
1185 RTE_PMD_DEBUG_TRACE(
1186 "port %d must be stopped to allow reset\n",
1187 dev->data->port_id);
1191 rte_eth_dev_rx_queue_config(dev, 0);
1192 rte_eth_dev_tx_queue_config(dev, 0);
1194 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1198 rte_eth_dev_config_restore(uint16_t port_id)
1200 struct rte_eth_dev *dev;
1201 struct rte_eth_dev_info dev_info;
1202 struct ether_addr *addr;
1207 dev = &rte_eth_devices[port_id];
1209 rte_eth_dev_info_get(port_id, &dev_info);
1211 /* replay MAC address configuration including default MAC */
1212 addr = &dev->data->mac_addrs[0];
1213 if (*dev->dev_ops->mac_addr_set != NULL)
1214 (*dev->dev_ops->mac_addr_set)(dev, addr);
1215 else if (*dev->dev_ops->mac_addr_add != NULL)
1216 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1218 if (*dev->dev_ops->mac_addr_add != NULL) {
1219 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1220 addr = &dev->data->mac_addrs[i];
1222 /* skip zero address */
1223 if (is_zero_ether_addr(addr))
1227 pool_mask = dev->data->mac_pool_sel[i];
1230 if (pool_mask & 1ULL)
1231 (*dev->dev_ops->mac_addr_add)(dev,
1235 } while (pool_mask);
1239 /* replay promiscuous configuration */
1240 if (rte_eth_promiscuous_get(port_id) == 1)
1241 rte_eth_promiscuous_enable(port_id);
1242 else if (rte_eth_promiscuous_get(port_id) == 0)
1243 rte_eth_promiscuous_disable(port_id);
1245 /* replay all multicast configuration */
1246 if (rte_eth_allmulticast_get(port_id) == 1)
1247 rte_eth_allmulticast_enable(port_id);
1248 else if (rte_eth_allmulticast_get(port_id) == 0)
1249 rte_eth_allmulticast_disable(port_id);
1253 rte_eth_dev_start(uint16_t port_id)
1255 struct rte_eth_dev *dev;
1258 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1260 dev = &rte_eth_devices[port_id];
1262 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1264 if (dev->data->dev_started != 0) {
1265 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1266 " already started\n",
1271 diag = (*dev->dev_ops->dev_start)(dev);
1273 dev->data->dev_started = 1;
1275 return eth_err(port_id, diag);
1277 rte_eth_dev_config_restore(port_id);
1279 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1280 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1281 (*dev->dev_ops->link_update)(dev, 0);
1287 rte_eth_dev_stop(uint16_t port_id)
1289 struct rte_eth_dev *dev;
1291 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1292 dev = &rte_eth_devices[port_id];
1294 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1296 if (dev->data->dev_started == 0) {
1297 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1298 " already stopped\n",
1303 dev->data->dev_started = 0;
1304 (*dev->dev_ops->dev_stop)(dev);
1308 rte_eth_dev_set_link_up(uint16_t port_id)
1310 struct rte_eth_dev *dev;
1312 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1314 dev = &rte_eth_devices[port_id];
1316 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1317 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1321 rte_eth_dev_set_link_down(uint16_t port_id)
1323 struct rte_eth_dev *dev;
1325 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1327 dev = &rte_eth_devices[port_id];
1329 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1330 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1334 rte_eth_dev_close(uint16_t port_id)
1336 struct rte_eth_dev *dev;
1338 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1339 dev = &rte_eth_devices[port_id];
1341 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1342 dev->data->dev_started = 0;
1343 (*dev->dev_ops->dev_close)(dev);
1345 dev->data->nb_rx_queues = 0;
1346 rte_free(dev->data->rx_queues);
1347 dev->data->rx_queues = NULL;
1348 dev->data->nb_tx_queues = 0;
1349 rte_free(dev->data->tx_queues);
1350 dev->data->tx_queues = NULL;
1354 rte_eth_dev_reset(uint16_t port_id)
1356 struct rte_eth_dev *dev;
1359 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1360 dev = &rte_eth_devices[port_id];
1362 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1364 rte_eth_dev_stop(port_id);
1365 ret = dev->dev_ops->dev_reset(dev);
1367 return eth_err(port_id, ret);
1370 int __rte_experimental
1371 rte_eth_dev_is_removed(uint16_t port_id)
1373 struct rte_eth_dev *dev;
1376 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1378 dev = &rte_eth_devices[port_id];
1380 if (dev->state == RTE_ETH_DEV_REMOVED)
1383 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1385 ret = dev->dev_ops->is_removed(dev);
1387 /* Device is physically removed. */
1388 dev->state = RTE_ETH_DEV_REMOVED;
1394 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1395 uint16_t nb_rx_desc, unsigned int socket_id,
1396 const struct rte_eth_rxconf *rx_conf,
1397 struct rte_mempool *mp)
1400 uint32_t mbp_buf_size;
1401 struct rte_eth_dev *dev;
1402 struct rte_eth_dev_info dev_info;
1403 struct rte_eth_rxconf local_conf;
1406 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1408 dev = &rte_eth_devices[port_id];
1409 if (rx_queue_id >= dev->data->nb_rx_queues) {
1410 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1414 if (dev->data->dev_started) {
1415 RTE_PMD_DEBUG_TRACE(
1416 "port %d must be stopped to allow configuration\n", port_id);
1420 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1421 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1424 * Check the size of the mbuf data buffer.
1425 * This value must be provided in the private data of the memory pool.
1426 * First check that the memory pool has a valid private data.
1428 rte_eth_dev_info_get(port_id, &dev_info);
1429 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1430 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1431 mp->name, (int) mp->private_data_size,
1432 (int) sizeof(struct rte_pktmbuf_pool_private));
1435 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1437 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1438 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1439 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1443 (int)(RTE_PKTMBUF_HEADROOM +
1444 dev_info.min_rx_bufsize),
1445 (int)RTE_PKTMBUF_HEADROOM,
1446 (int)dev_info.min_rx_bufsize);
1450 /* Use default specified by driver, if nb_rx_desc is zero */
1451 if (nb_rx_desc == 0) {
1452 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1453 /* If driver default is also zero, fall back on EAL default */
1454 if (nb_rx_desc == 0)
1455 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1458 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1459 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1460 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1462 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1463 "should be: <= %hu, = %hu, and a product of %hu\n",
1465 dev_info.rx_desc_lim.nb_max,
1466 dev_info.rx_desc_lim.nb_min,
1467 dev_info.rx_desc_lim.nb_align);
1471 rxq = dev->data->rx_queues;
1472 if (rxq[rx_queue_id]) {
1473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1475 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1476 rxq[rx_queue_id] = NULL;
1479 if (rx_conf == NULL)
1480 rx_conf = &dev_info.default_rxconf;
1482 local_conf = *rx_conf;
1483 if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1485 * Reflect port offloads to queue offloads in order for
1486 * offloads to not be discarded.
1488 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1489 &local_conf.offloads);
1492 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1493 socket_id, &local_conf, mp);
1495 if (!dev->data->min_rx_buf_size ||
1496 dev->data->min_rx_buf_size > mbp_buf_size)
1497 dev->data->min_rx_buf_size = mbp_buf_size;
1500 return eth_err(port_id, ret);
1504 * A conversion function from txq_flags API.
1507 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1509 uint64_t offloads = 0;
1511 if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1512 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1513 if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1514 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1515 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1516 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1517 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1518 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1519 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1520 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1521 if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1522 (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1523 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1525 *tx_offloads = offloads;
1529 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1530 uint16_t nb_tx_desc, unsigned int socket_id,
1531 const struct rte_eth_txconf *tx_conf)
1533 struct rte_eth_dev *dev;
1534 struct rte_eth_dev_info dev_info;
1535 struct rte_eth_txconf local_conf;
1538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1540 dev = &rte_eth_devices[port_id];
1541 if (tx_queue_id >= dev->data->nb_tx_queues) {
1542 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1546 if (dev->data->dev_started) {
1547 RTE_PMD_DEBUG_TRACE(
1548 "port %d must be stopped to allow configuration\n", port_id);
1552 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1553 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1555 rte_eth_dev_info_get(port_id, &dev_info);
1557 /* Use default specified by driver, if nb_tx_desc is zero */
1558 if (nb_tx_desc == 0) {
1559 nb_tx_desc = dev_info.default_txportconf.ring_size;
1560 /* If driver default is zero, fall back on EAL default */
1561 if (nb_tx_desc == 0)
1562 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1564 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1565 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1566 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1567 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1568 "should be: <= %hu, = %hu, and a product of %hu\n",
1570 dev_info.tx_desc_lim.nb_max,
1571 dev_info.tx_desc_lim.nb_min,
1572 dev_info.tx_desc_lim.nb_align);
1576 txq = dev->data->tx_queues;
1577 if (txq[tx_queue_id]) {
1578 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1580 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1581 txq[tx_queue_id] = NULL;
1584 if (tx_conf == NULL)
1585 tx_conf = &dev_info.default_txconf;
1588 * Convert between the offloads API to enable PMDs to support
1591 local_conf = *tx_conf;
1592 if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1593 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1594 &local_conf.offloads);
1597 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1598 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1602 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1603 void *userdata __rte_unused)
1607 for (i = 0; i < unsent; i++)
1608 rte_pktmbuf_free(pkts[i]);
1612 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1615 uint64_t *count = userdata;
1618 for (i = 0; i < unsent; i++)
1619 rte_pktmbuf_free(pkts[i]);
1625 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1626 buffer_tx_error_fn cbfn, void *userdata)
1628 buffer->error_callback = cbfn;
1629 buffer->error_userdata = userdata;
1634 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1641 buffer->size = size;
1642 if (buffer->error_callback == NULL) {
1643 ret = rte_eth_tx_buffer_set_err_callback(
1644 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1651 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1653 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1656 /* Validate Input Data. Bail if not valid or not supported. */
1657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1658 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1660 /* Call driver to free pending mbufs. */
1661 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1663 return eth_err(port_id, ret);
1667 rte_eth_promiscuous_enable(uint16_t port_id)
1669 struct rte_eth_dev *dev;
1671 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1672 dev = &rte_eth_devices[port_id];
1674 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1675 (*dev->dev_ops->promiscuous_enable)(dev);
1676 dev->data->promiscuous = 1;
1680 rte_eth_promiscuous_disable(uint16_t port_id)
1682 struct rte_eth_dev *dev;
1684 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1685 dev = &rte_eth_devices[port_id];
1687 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1688 dev->data->promiscuous = 0;
1689 (*dev->dev_ops->promiscuous_disable)(dev);
1693 rte_eth_promiscuous_get(uint16_t port_id)
1695 struct rte_eth_dev *dev;
1697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1699 dev = &rte_eth_devices[port_id];
1700 return dev->data->promiscuous;
1704 rte_eth_allmulticast_enable(uint16_t port_id)
1706 struct rte_eth_dev *dev;
1708 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1709 dev = &rte_eth_devices[port_id];
1711 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1712 (*dev->dev_ops->allmulticast_enable)(dev);
1713 dev->data->all_multicast = 1;
1717 rte_eth_allmulticast_disable(uint16_t port_id)
1719 struct rte_eth_dev *dev;
1721 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1722 dev = &rte_eth_devices[port_id];
1724 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1725 dev->data->all_multicast = 0;
1726 (*dev->dev_ops->allmulticast_disable)(dev);
1730 rte_eth_allmulticast_get(uint16_t port_id)
1732 struct rte_eth_dev *dev;
1734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1736 dev = &rte_eth_devices[port_id];
1737 return dev->data->all_multicast;
1741 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1743 struct rte_eth_dev *dev;
1745 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1746 dev = &rte_eth_devices[port_id];
1748 if (dev->data->dev_conf.intr_conf.lsc &&
1749 dev->data->dev_started)
1750 rte_eth_linkstatus_get(dev, eth_link);
1752 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1753 (*dev->dev_ops->link_update)(dev, 1);
1754 *eth_link = dev->data->dev_link;
1759 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1761 struct rte_eth_dev *dev;
1763 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1764 dev = &rte_eth_devices[port_id];
1766 if (dev->data->dev_conf.intr_conf.lsc &&
1767 dev->data->dev_started)
1768 rte_eth_linkstatus_get(dev, eth_link);
1770 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1771 (*dev->dev_ops->link_update)(dev, 0);
1772 *eth_link = dev->data->dev_link;
1777 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1779 struct rte_eth_dev *dev;
1781 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1783 dev = &rte_eth_devices[port_id];
1784 memset(stats, 0, sizeof(*stats));
1786 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1787 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1788 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1792 rte_eth_stats_reset(uint16_t port_id)
1794 struct rte_eth_dev *dev;
1796 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1797 dev = &rte_eth_devices[port_id];
1799 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1800 (*dev->dev_ops->stats_reset)(dev);
1801 dev->data->rx_mbuf_alloc_failed = 0;
1807 get_xstats_basic_count(struct rte_eth_dev *dev)
1809 uint16_t nb_rxqs, nb_txqs;
1812 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1813 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1815 count = RTE_NB_STATS;
1816 count += nb_rxqs * RTE_NB_RXQ_STATS;
1817 count += nb_txqs * RTE_NB_TXQ_STATS;
1823 get_xstats_count(uint16_t port_id)
1825 struct rte_eth_dev *dev;
1828 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1829 dev = &rte_eth_devices[port_id];
1830 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1831 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1834 return eth_err(port_id, count);
1836 if (dev->dev_ops->xstats_get_names != NULL) {
1837 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1839 return eth_err(port_id, count);
1844 count += get_xstats_basic_count(dev);
1850 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1853 int cnt_xstats, idx_xstat;
1855 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1858 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1863 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1868 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1869 if (cnt_xstats < 0) {
1870 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1874 /* Get id-name lookup table */
1875 struct rte_eth_xstat_name xstats_names[cnt_xstats];
1877 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1878 port_id, xstats_names, cnt_xstats, NULL)) {
1879 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1883 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1884 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1893 /* retrieve basic stats names */
1895 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1896 struct rte_eth_xstat_name *xstats_names)
1898 int cnt_used_entries = 0;
1899 uint32_t idx, id_queue;
1902 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1903 snprintf(xstats_names[cnt_used_entries].name,
1904 sizeof(xstats_names[0].name),
1905 "%s", rte_stats_strings[idx].name);
1908 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1909 for (id_queue = 0; id_queue < num_q; id_queue++) {
1910 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1911 snprintf(xstats_names[cnt_used_entries].name,
1912 sizeof(xstats_names[0].name),
1914 id_queue, rte_rxq_stats_strings[idx].name);
1919 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1920 for (id_queue = 0; id_queue < num_q; id_queue++) {
1921 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1922 snprintf(xstats_names[cnt_used_entries].name,
1923 sizeof(xstats_names[0].name),
1925 id_queue, rte_txq_stats_strings[idx].name);
1929 return cnt_used_entries;
1932 /* retrieve ethdev extended statistics names */
1934 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1935 struct rte_eth_xstat_name *xstats_names, unsigned int size,
1938 struct rte_eth_xstat_name *xstats_names_copy;
1939 unsigned int no_basic_stat_requested = 1;
1940 unsigned int no_ext_stat_requested = 1;
1941 unsigned int expected_entries;
1942 unsigned int basic_count;
1943 struct rte_eth_dev *dev;
1947 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1948 dev = &rte_eth_devices[port_id];
1950 basic_count = get_xstats_basic_count(dev);
1951 ret = get_xstats_count(port_id);
1954 expected_entries = (unsigned int)ret;
1956 /* Return max number of stats if no ids given */
1959 return expected_entries;
1960 else if (xstats_names && size < expected_entries)
1961 return expected_entries;
1964 if (ids && !xstats_names)
1967 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1968 uint64_t ids_copy[size];
1970 for (i = 0; i < size; i++) {
1971 if (ids[i] < basic_count) {
1972 no_basic_stat_requested = 0;
1977 * Convert ids to xstats ids that PMD knows.
1978 * ids known by user are basic + extended stats.
1980 ids_copy[i] = ids[i] - basic_count;
1983 if (no_basic_stat_requested)
1984 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1985 xstats_names, ids_copy, size);
1988 /* Retrieve all stats */
1990 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1992 if (num_stats < 0 || num_stats > (int)expected_entries)
1995 return expected_entries;
1998 xstats_names_copy = calloc(expected_entries,
1999 sizeof(struct rte_eth_xstat_name));
2001 if (!xstats_names_copy) {
2002 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2007 for (i = 0; i < size; i++) {
2008 if (ids[i] >= basic_count) {
2009 no_ext_stat_requested = 0;
2015 /* Fill xstats_names_copy structure */
2016 if (ids && no_ext_stat_requested) {
2017 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2019 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2022 free(xstats_names_copy);
2028 for (i = 0; i < size; i++) {
2029 if (ids[i] >= expected_entries) {
2030 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2031 free(xstats_names_copy);
2034 xstats_names[i] = xstats_names_copy[ids[i]];
2037 free(xstats_names_copy);
2042 rte_eth_xstats_get_names(uint16_t port_id,
2043 struct rte_eth_xstat_name *xstats_names,
2046 struct rte_eth_dev *dev;
2047 int cnt_used_entries;
2048 int cnt_expected_entries;
2049 int cnt_driver_entries;
2051 cnt_expected_entries = get_xstats_count(port_id);
2052 if (xstats_names == NULL || cnt_expected_entries < 0 ||
2053 (int)size < cnt_expected_entries)
2054 return cnt_expected_entries;
2056 /* port_id checked in get_xstats_count() */
2057 dev = &rte_eth_devices[port_id];
2059 cnt_used_entries = rte_eth_basic_stats_get_names(
2062 if (dev->dev_ops->xstats_get_names != NULL) {
2063 /* If there are any driver-specific xstats, append them
2066 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2068 xstats_names + cnt_used_entries,
2069 size - cnt_used_entries);
2070 if (cnt_driver_entries < 0)
2071 return eth_err(port_id, cnt_driver_entries);
2072 cnt_used_entries += cnt_driver_entries;
2075 return cnt_used_entries;
2080 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2082 struct rte_eth_dev *dev;
2083 struct rte_eth_stats eth_stats;
2084 unsigned int count = 0, i, q;
2085 uint64_t val, *stats_ptr;
2086 uint16_t nb_rxqs, nb_txqs;
2089 ret = rte_eth_stats_get(port_id, ð_stats);
2093 dev = &rte_eth_devices[port_id];
2095 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2096 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2099 for (i = 0; i < RTE_NB_STATS; i++) {
2100 stats_ptr = RTE_PTR_ADD(ð_stats,
2101 rte_stats_strings[i].offset);
2103 xstats[count++].value = val;
2107 for (q = 0; q < nb_rxqs; q++) {
2108 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2109 stats_ptr = RTE_PTR_ADD(ð_stats,
2110 rte_rxq_stats_strings[i].offset +
2111 q * sizeof(uint64_t));
2113 xstats[count++].value = val;
2118 for (q = 0; q < nb_txqs; q++) {
2119 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2120 stats_ptr = RTE_PTR_ADD(ð_stats,
2121 rte_txq_stats_strings[i].offset +
2122 q * sizeof(uint64_t));
2124 xstats[count++].value = val;
2130 /* retrieve ethdev extended statistics */
2132 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2133 uint64_t *values, unsigned int size)
2135 unsigned int no_basic_stat_requested = 1;
2136 unsigned int no_ext_stat_requested = 1;
2137 unsigned int num_xstats_filled;
2138 unsigned int basic_count;
2139 uint16_t expected_entries;
2140 struct rte_eth_dev *dev;
2144 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2145 ret = get_xstats_count(port_id);
2148 expected_entries = (uint16_t)ret;
2149 struct rte_eth_xstat xstats[expected_entries];
2150 dev = &rte_eth_devices[port_id];
2151 basic_count = get_xstats_basic_count(dev);
2153 /* Return max number of stats if no ids given */
2156 return expected_entries;
2157 else if (values && size < expected_entries)
2158 return expected_entries;
2164 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2165 unsigned int basic_count = get_xstats_basic_count(dev);
2166 uint64_t ids_copy[size];
2168 for (i = 0; i < size; i++) {
2169 if (ids[i] < basic_count) {
2170 no_basic_stat_requested = 0;
2175 * Convert ids to xstats ids that PMD knows.
2176 * ids known by user are basic + extended stats.
2178 ids_copy[i] = ids[i] - basic_count;
2181 if (no_basic_stat_requested)
2182 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2187 for (i = 0; i < size; i++) {
2188 if (ids[i] >= basic_count) {
2189 no_ext_stat_requested = 0;
2195 /* Fill the xstats structure */
2196 if (ids && no_ext_stat_requested)
2197 ret = rte_eth_basic_stats_get(port_id, xstats);
2199 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2203 num_xstats_filled = (unsigned int)ret;
2205 /* Return all stats */
2207 for (i = 0; i < num_xstats_filled; i++)
2208 values[i] = xstats[i].value;
2209 return expected_entries;
2213 for (i = 0; i < size; i++) {
2214 if (ids[i] >= expected_entries) {
2215 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2218 values[i] = xstats[ids[i]].value;
2224 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2227 struct rte_eth_dev *dev;
2228 unsigned int count = 0, i;
2229 signed int xcount = 0;
2230 uint16_t nb_rxqs, nb_txqs;
2233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2235 dev = &rte_eth_devices[port_id];
2237 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2238 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2240 /* Return generic statistics */
2241 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2242 (nb_txqs * RTE_NB_TXQ_STATS);
2244 /* implemented by the driver */
2245 if (dev->dev_ops->xstats_get != NULL) {
2246 /* Retrieve the xstats from the driver at the end of the
2249 xcount = (*dev->dev_ops->xstats_get)(dev,
2250 xstats ? xstats + count : NULL,
2251 (n > count) ? n - count : 0);
2254 return eth_err(port_id, xcount);
2257 if (n < count + xcount || xstats == NULL)
2258 return count + xcount;
2260 /* now fill the xstats structure */
2261 ret = rte_eth_basic_stats_get(port_id, xstats);
2266 for (i = 0; i < count; i++)
2268 /* add an offset to driver-specific stats */
2269 for ( ; i < count + xcount; i++)
2270 xstats[i].id += count;
2272 return count + xcount;
2275 /* reset ethdev extended statistics */
2277 rte_eth_xstats_reset(uint16_t port_id)
2279 struct rte_eth_dev *dev;
2281 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2282 dev = &rte_eth_devices[port_id];
2284 /* implemented by the driver */
2285 if (dev->dev_ops->xstats_reset != NULL) {
2286 (*dev->dev_ops->xstats_reset)(dev);
2290 /* fallback to default */
2291 rte_eth_stats_reset(port_id);
2295 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2298 struct rte_eth_dev *dev;
2300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2302 dev = &rte_eth_devices[port_id];
2304 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2305 return (*dev->dev_ops->queue_stats_mapping_set)
2306 (dev, queue_id, stat_idx, is_rx);
2311 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2314 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2315 stat_idx, STAT_QMAP_TX));
2320 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2323 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2324 stat_idx, STAT_QMAP_RX));
2328 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2330 struct rte_eth_dev *dev;
2332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2333 dev = &rte_eth_devices[port_id];
2335 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2336 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2337 fw_version, fw_size));
2341 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2343 struct rte_eth_dev *dev;
2344 const struct rte_eth_desc_lim lim = {
2345 .nb_max = UINT16_MAX,
2350 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2351 dev = &rte_eth_devices[port_id];
2353 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2354 dev_info->rx_desc_lim = lim;
2355 dev_info->tx_desc_lim = lim;
2356 dev_info->device = dev->device;
2358 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2359 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2360 dev_info->driver_name = dev->device->driver->name;
2361 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2362 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2366 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2367 uint32_t *ptypes, int num)
2370 struct rte_eth_dev *dev;
2371 const uint32_t *all_ptypes;
2373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374 dev = &rte_eth_devices[port_id];
2375 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2376 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2381 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2382 if (all_ptypes[i] & ptype_mask) {
2384 ptypes[j] = all_ptypes[i];
2392 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2394 struct rte_eth_dev *dev;
2396 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2397 dev = &rte_eth_devices[port_id];
2398 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2403 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2405 struct rte_eth_dev *dev;
2407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2409 dev = &rte_eth_devices[port_id];
2410 *mtu = dev->data->mtu;
2415 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2418 struct rte_eth_dev *dev;
2420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2421 dev = &rte_eth_devices[port_id];
2422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2424 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2426 dev->data->mtu = mtu;
2428 return eth_err(port_id, ret);
2432 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2434 struct rte_eth_dev *dev;
2437 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2438 dev = &rte_eth_devices[port_id];
2439 if (!(dev->data->dev_conf.rxmode.offloads &
2440 DEV_RX_OFFLOAD_VLAN_FILTER)) {
2441 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2445 if (vlan_id > 4095) {
2446 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2447 port_id, (unsigned) vlan_id);
2450 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2452 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2454 struct rte_vlan_filter_conf *vfc;
2458 vfc = &dev->data->vlan_filter_conf;
2459 vidx = vlan_id / 64;
2460 vbit = vlan_id % 64;
2463 vfc->ids[vidx] |= UINT64_C(1) << vbit;
2465 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2468 return eth_err(port_id, ret);
2472 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2475 struct rte_eth_dev *dev;
2477 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2478 dev = &rte_eth_devices[port_id];
2479 if (rx_queue_id >= dev->data->nb_rx_queues) {
2480 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2484 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2485 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2491 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2492 enum rte_vlan_type vlan_type,
2495 struct rte_eth_dev *dev;
2497 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2498 dev = &rte_eth_devices[port_id];
2499 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2501 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2506 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2508 struct rte_eth_dev *dev;
2512 uint64_t orig_offloads;
2514 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2515 dev = &rte_eth_devices[port_id];
2517 /* save original values in case of failure */
2518 orig_offloads = dev->data->dev_conf.rxmode.offloads;
2520 /*check which option changed by application*/
2521 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2522 org = !!(dev->data->dev_conf.rxmode.offloads &
2523 DEV_RX_OFFLOAD_VLAN_STRIP);
2526 dev->data->dev_conf.rxmode.offloads |=
2527 DEV_RX_OFFLOAD_VLAN_STRIP;
2529 dev->data->dev_conf.rxmode.offloads &=
2530 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2531 mask |= ETH_VLAN_STRIP_MASK;
2534 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2535 org = !!(dev->data->dev_conf.rxmode.offloads &
2536 DEV_RX_OFFLOAD_VLAN_FILTER);
2539 dev->data->dev_conf.rxmode.offloads |=
2540 DEV_RX_OFFLOAD_VLAN_FILTER;
2542 dev->data->dev_conf.rxmode.offloads &=
2543 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2544 mask |= ETH_VLAN_FILTER_MASK;
2547 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2548 org = !!(dev->data->dev_conf.rxmode.offloads &
2549 DEV_RX_OFFLOAD_VLAN_EXTEND);
2552 dev->data->dev_conf.rxmode.offloads |=
2553 DEV_RX_OFFLOAD_VLAN_EXTEND;
2555 dev->data->dev_conf.rxmode.offloads &=
2556 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2557 mask |= ETH_VLAN_EXTEND_MASK;
2564 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2565 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2567 /* hit an error restore original values */
2568 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2571 return eth_err(port_id, ret);
2575 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2577 struct rte_eth_dev *dev;
2580 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2581 dev = &rte_eth_devices[port_id];
2583 if (dev->data->dev_conf.rxmode.offloads &
2584 DEV_RX_OFFLOAD_VLAN_STRIP)
2585 ret |= ETH_VLAN_STRIP_OFFLOAD;
2587 if (dev->data->dev_conf.rxmode.offloads &
2588 DEV_RX_OFFLOAD_VLAN_FILTER)
2589 ret |= ETH_VLAN_FILTER_OFFLOAD;
2591 if (dev->data->dev_conf.rxmode.offloads &
2592 DEV_RX_OFFLOAD_VLAN_EXTEND)
2593 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2599 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2601 struct rte_eth_dev *dev;
2603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2604 dev = &rte_eth_devices[port_id];
2605 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2607 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2611 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2613 struct rte_eth_dev *dev;
2615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2616 dev = &rte_eth_devices[port_id];
2617 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2618 memset(fc_conf, 0, sizeof(*fc_conf));
2619 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2623 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2625 struct rte_eth_dev *dev;
2627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2629 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2633 dev = &rte_eth_devices[port_id];
2634 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2635 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2639 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2640 struct rte_eth_pfc_conf *pfc_conf)
2642 struct rte_eth_dev *dev;
2644 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2645 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2646 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2650 dev = &rte_eth_devices[port_id];
2651 /* High water, low water validation are device specific */
2652 if (*dev->dev_ops->priority_flow_ctrl_set)
2653 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2659 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2667 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2668 for (i = 0; i < num; i++) {
2669 if (reta_conf[i].mask)
2677 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2681 uint16_t i, idx, shift;
2687 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2691 for (i = 0; i < reta_size; i++) {
2692 idx = i / RTE_RETA_GROUP_SIZE;
2693 shift = i % RTE_RETA_GROUP_SIZE;
2694 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2695 (reta_conf[idx].reta[shift] >= max_rxq)) {
2696 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2697 "the maximum rxq index: %u\n", idx, shift,
2698 reta_conf[idx].reta[shift], max_rxq);
2707 rte_eth_dev_rss_reta_update(uint16_t port_id,
2708 struct rte_eth_rss_reta_entry64 *reta_conf,
2711 struct rte_eth_dev *dev;
2714 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2715 /* Check mask bits */
2716 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2720 dev = &rte_eth_devices[port_id];
2722 /* Check entry value */
2723 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2724 dev->data->nb_rx_queues);
2728 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2729 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2734 rte_eth_dev_rss_reta_query(uint16_t port_id,
2735 struct rte_eth_rss_reta_entry64 *reta_conf,
2738 struct rte_eth_dev *dev;
2741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743 /* Check mask bits */
2744 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2748 dev = &rte_eth_devices[port_id];
2749 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2750 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2755 rte_eth_dev_rss_hash_update(uint16_t port_id,
2756 struct rte_eth_rss_conf *rss_conf)
2758 struct rte_eth_dev *dev;
2760 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2761 dev = &rte_eth_devices[port_id];
2762 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2763 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2768 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2769 struct rte_eth_rss_conf *rss_conf)
2771 struct rte_eth_dev *dev;
2773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2774 dev = &rte_eth_devices[port_id];
2775 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2776 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2781 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2782 struct rte_eth_udp_tunnel *udp_tunnel)
2784 struct rte_eth_dev *dev;
2786 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2787 if (udp_tunnel == NULL) {
2788 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2792 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2793 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2797 dev = &rte_eth_devices[port_id];
2798 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2799 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2804 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2805 struct rte_eth_udp_tunnel *udp_tunnel)
2807 struct rte_eth_dev *dev;
2809 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2810 dev = &rte_eth_devices[port_id];
2812 if (udp_tunnel == NULL) {
2813 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2817 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2818 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2822 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2823 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2828 rte_eth_led_on(uint16_t port_id)
2830 struct rte_eth_dev *dev;
2832 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2833 dev = &rte_eth_devices[port_id];
2834 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2835 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2839 rte_eth_led_off(uint16_t port_id)
2841 struct rte_eth_dev *dev;
2843 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2844 dev = &rte_eth_devices[port_id];
2845 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2846 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2850 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2854 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2856 struct rte_eth_dev_info dev_info;
2857 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2860 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2861 rte_eth_dev_info_get(port_id, &dev_info);
2863 for (i = 0; i < dev_info.max_mac_addrs; i++)
2864 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2870 static const struct ether_addr null_mac_addr;
2873 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2876 struct rte_eth_dev *dev;
2881 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2882 dev = &rte_eth_devices[port_id];
2883 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2885 if (is_zero_ether_addr(addr)) {
2886 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2890 if (pool >= ETH_64_POOLS) {
2891 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2895 index = get_mac_addr_index(port_id, addr);
2897 index = get_mac_addr_index(port_id, &null_mac_addr);
2899 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2904 pool_mask = dev->data->mac_pool_sel[index];
2906 /* Check if both MAC address and pool is already there, and do nothing */
2907 if (pool_mask & (1ULL << pool))
2912 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2915 /* Update address in NIC data structure */
2916 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2918 /* Update pool bitmap in NIC data structure */
2919 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2922 return eth_err(port_id, ret);
2926 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2928 struct rte_eth_dev *dev;
2931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2932 dev = &rte_eth_devices[port_id];
2933 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2935 index = get_mac_addr_index(port_id, addr);
2937 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2939 } else if (index < 0)
2940 return 0; /* Do nothing if address wasn't found */
2943 (*dev->dev_ops->mac_addr_remove)(dev, index);
2945 /* Update address in NIC data structure */
2946 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2948 /* reset pool bitmap */
2949 dev->data->mac_pool_sel[index] = 0;
2955 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2957 struct rte_eth_dev *dev;
2960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2962 if (!is_valid_assigned_ether_addr(addr))
2965 dev = &rte_eth_devices[port_id];
2966 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2968 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
2972 /* Update default address in NIC data structure */
2973 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2980 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2984 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2986 struct rte_eth_dev_info dev_info;
2987 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2990 rte_eth_dev_info_get(port_id, &dev_info);
2991 if (!dev->data->hash_mac_addrs)
2994 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2995 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2996 ETHER_ADDR_LEN) == 0)
3003 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3008 struct rte_eth_dev *dev;
3010 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3012 dev = &rte_eth_devices[port_id];
3013 if (is_zero_ether_addr(addr)) {
3014 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3019 index = get_hash_mac_addr_index(port_id, addr);
3020 /* Check if it's already there, and do nothing */
3021 if ((index >= 0) && on)
3026 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3027 "set in UTA\n", port_id);
3031 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3033 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3039 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3040 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3042 /* Update address in NIC data structure */
3044 ether_addr_copy(addr,
3045 &dev->data->hash_mac_addrs[index]);
3047 ether_addr_copy(&null_mac_addr,
3048 &dev->data->hash_mac_addrs[index]);
3051 return eth_err(port_id, ret);
3055 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3057 struct rte_eth_dev *dev;
3059 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3061 dev = &rte_eth_devices[port_id];
3063 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3064 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3068 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3071 struct rte_eth_dev *dev;
3072 struct rte_eth_dev_info dev_info;
3073 struct rte_eth_link link;
3075 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3077 dev = &rte_eth_devices[port_id];
3078 rte_eth_dev_info_get(port_id, &dev_info);
3079 link = dev->data->dev_link;
3081 if (queue_idx > dev_info.max_tx_queues) {
3082 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3083 "invalid queue id=%d\n", port_id, queue_idx);
3087 if (tx_rate > link.link_speed) {
3088 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3089 "bigger than link speed= %d\n",
3090 tx_rate, link.link_speed);
3094 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3095 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3096 queue_idx, tx_rate));
3100 rte_eth_mirror_rule_set(uint16_t port_id,
3101 struct rte_eth_mirror_conf *mirror_conf,
3102 uint8_t rule_id, uint8_t on)
3104 struct rte_eth_dev *dev;
3106 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3107 if (mirror_conf->rule_type == 0) {
3108 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3112 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3113 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3118 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3119 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3120 (mirror_conf->pool_mask == 0)) {
3121 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3125 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3126 mirror_conf->vlan.vlan_mask == 0) {
3127 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3131 dev = &rte_eth_devices[port_id];
3132 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3134 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3135 mirror_conf, rule_id, on));
3139 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3141 struct rte_eth_dev *dev;
3143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3145 dev = &rte_eth_devices[port_id];
3146 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3148 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3152 RTE_INIT(eth_dev_init_cb_lists)
3156 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3157 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3161 rte_eth_dev_callback_register(uint16_t port_id,
3162 enum rte_eth_event_type event,
3163 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3165 struct rte_eth_dev *dev;
3166 struct rte_eth_dev_callback *user_cb;
3167 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3173 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3174 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3178 if (port_id == RTE_ETH_ALL) {
3180 last_port = RTE_MAX_ETHPORTS - 1;
3182 next_port = last_port = port_id;
3185 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3188 dev = &rte_eth_devices[next_port];
3190 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3191 if (user_cb->cb_fn == cb_fn &&
3192 user_cb->cb_arg == cb_arg &&
3193 user_cb->event == event) {
3198 /* create a new callback. */
3199 if (user_cb == NULL) {
3200 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3201 sizeof(struct rte_eth_dev_callback), 0);
3202 if (user_cb != NULL) {
3203 user_cb->cb_fn = cb_fn;
3204 user_cb->cb_arg = cb_arg;
3205 user_cb->event = event;
3206 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3209 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3210 rte_eth_dev_callback_unregister(port_id, event,
3216 } while (++next_port <= last_port);
3218 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3223 rte_eth_dev_callback_unregister(uint16_t port_id,
3224 enum rte_eth_event_type event,
3225 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3228 struct rte_eth_dev *dev;
3229 struct rte_eth_dev_callback *cb, *next;
3230 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3236 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3237 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3241 if (port_id == RTE_ETH_ALL) {
3243 last_port = RTE_MAX_ETHPORTS - 1;
3245 next_port = last_port = port_id;
3248 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3251 dev = &rte_eth_devices[next_port];
3253 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3256 next = TAILQ_NEXT(cb, next);
3258 if (cb->cb_fn != cb_fn || cb->event != event ||
3259 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3263 * if this callback is not executing right now,
3266 if (cb->active == 0) {
3267 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3273 } while (++next_port <= last_port);
3275 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3280 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3281 enum rte_eth_event_type event, void *ret_param)
3283 struct rte_eth_dev_callback *cb_lst;
3284 struct rte_eth_dev_callback dev_cb;
3287 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3288 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3289 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3293 if (ret_param != NULL)
3294 dev_cb.ret_param = ret_param;
3296 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3297 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3298 dev_cb.cb_arg, dev_cb.ret_param);
3299 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3302 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3307 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3310 struct rte_eth_dev *dev;
3311 struct rte_intr_handle *intr_handle;
3315 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3317 dev = &rte_eth_devices[port_id];
3319 if (!dev->intr_handle) {
3320 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3324 intr_handle = dev->intr_handle;
3325 if (!intr_handle->intr_vec) {
3326 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3330 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3331 vec = intr_handle->intr_vec[qid];
3332 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3333 if (rc && rc != -EEXIST) {
3334 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3335 " op %d epfd %d vec %u\n",
3336 port_id, qid, op, epfd, vec);
3343 const struct rte_memzone *
3344 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3345 uint16_t queue_id, size_t size, unsigned align,
3348 char z_name[RTE_MEMZONE_NAMESIZE];
3349 const struct rte_memzone *mz;
3351 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3352 dev->device->driver->name, ring_name,
3353 dev->data->port_id, queue_id);
3355 mz = rte_memzone_lookup(z_name);
3359 return rte_memzone_reserve_aligned(z_name, size, socket_id,
3360 RTE_MEMZONE_IOVA_CONTIG, align);
3364 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3365 int epfd, int op, void *data)
3368 struct rte_eth_dev *dev;
3369 struct rte_intr_handle *intr_handle;
3372 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3374 dev = &rte_eth_devices[port_id];
3375 if (queue_id >= dev->data->nb_rx_queues) {
3376 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3380 if (!dev->intr_handle) {
3381 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3385 intr_handle = dev->intr_handle;
3386 if (!intr_handle->intr_vec) {
3387 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3391 vec = intr_handle->intr_vec[queue_id];
3392 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3393 if (rc && rc != -EEXIST) {
3394 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3395 " op %d epfd %d vec %u\n",
3396 port_id, queue_id, op, epfd, vec);
3404 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3407 struct rte_eth_dev *dev;
3409 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3411 dev = &rte_eth_devices[port_id];
3413 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3414 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3419 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3422 struct rte_eth_dev *dev;
3424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3426 dev = &rte_eth_devices[port_id];
3428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3429 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3435 rte_eth_dev_filter_supported(uint16_t port_id,
3436 enum rte_filter_type filter_type)
3438 struct rte_eth_dev *dev;
3440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3442 dev = &rte_eth_devices[port_id];
3443 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3444 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3445 RTE_ETH_FILTER_NOP, NULL);
3449 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3450 enum rte_filter_op filter_op, void *arg)
3452 struct rte_eth_dev *dev;
3454 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3456 dev = &rte_eth_devices[port_id];
3457 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3458 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3462 const struct rte_eth_rxtx_callback *
3463 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3464 rte_rx_callback_fn fn, void *user_param)
3466 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3467 rte_errno = ENOTSUP;
3470 /* check input parameters */
3471 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3472 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3476 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3484 cb->param = user_param;
3486 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3487 /* Add the callbacks in fifo order. */
3488 struct rte_eth_rxtx_callback *tail =
3489 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3492 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3499 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3504 const struct rte_eth_rxtx_callback *
3505 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3506 rte_rx_callback_fn fn, void *user_param)
3508 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3509 rte_errno = ENOTSUP;
3512 /* check input parameters */
3513 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3514 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3519 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3527 cb->param = user_param;
3529 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3530 /* Add the callbacks at fisrt position*/
3531 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3533 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3534 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3539 const struct rte_eth_rxtx_callback *
3540 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3541 rte_tx_callback_fn fn, void *user_param)
3543 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3544 rte_errno = ENOTSUP;
3547 /* check input parameters */
3548 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3549 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3554 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3562 cb->param = user_param;
3564 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3565 /* Add the callbacks in fifo order. */
3566 struct rte_eth_rxtx_callback *tail =
3567 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3570 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3577 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3583 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3584 const struct rte_eth_rxtx_callback *user_cb)
3586 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3589 /* Check input parameters. */
3590 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3591 if (user_cb == NULL ||
3592 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3595 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3596 struct rte_eth_rxtx_callback *cb;
3597 struct rte_eth_rxtx_callback **prev_cb;
3600 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3601 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3602 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3604 if (cb == user_cb) {
3605 /* Remove the user cb from the callback list. */
3606 *prev_cb = cb->next;
3611 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3617 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3618 const struct rte_eth_rxtx_callback *user_cb)
3620 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3623 /* Check input parameters. */
3624 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3625 if (user_cb == NULL ||
3626 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3629 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3631 struct rte_eth_rxtx_callback *cb;
3632 struct rte_eth_rxtx_callback **prev_cb;
3634 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3635 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3636 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3638 if (cb == user_cb) {
3639 /* Remove the user cb from the callback list. */
3640 *prev_cb = cb->next;
3645 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3651 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3652 struct rte_eth_rxq_info *qinfo)
3654 struct rte_eth_dev *dev;
3656 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3661 dev = &rte_eth_devices[port_id];
3662 if (queue_id >= dev->data->nb_rx_queues) {
3663 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3667 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3669 memset(qinfo, 0, sizeof(*qinfo));
3670 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3675 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3676 struct rte_eth_txq_info *qinfo)
3678 struct rte_eth_dev *dev;
3680 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3685 dev = &rte_eth_devices[port_id];
3686 if (queue_id >= dev->data->nb_tx_queues) {
3687 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3691 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3693 memset(qinfo, 0, sizeof(*qinfo));
3694 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3699 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3700 struct ether_addr *mc_addr_set,
3701 uint32_t nb_mc_addr)
3703 struct rte_eth_dev *dev;
3705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3707 dev = &rte_eth_devices[port_id];
3708 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3709 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3710 mc_addr_set, nb_mc_addr));
3714 rte_eth_timesync_enable(uint16_t port_id)
3716 struct rte_eth_dev *dev;
3718 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3719 dev = &rte_eth_devices[port_id];
3721 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3722 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3726 rte_eth_timesync_disable(uint16_t port_id)
3728 struct rte_eth_dev *dev;
3730 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3731 dev = &rte_eth_devices[port_id];
3733 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3734 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3738 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3741 struct rte_eth_dev *dev;
3743 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3744 dev = &rte_eth_devices[port_id];
3746 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3747 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3748 (dev, timestamp, flags));
3752 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3753 struct timespec *timestamp)
3755 struct rte_eth_dev *dev;
3757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3758 dev = &rte_eth_devices[port_id];
3760 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3761 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3766 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3768 struct rte_eth_dev *dev;
3770 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3771 dev = &rte_eth_devices[port_id];
3773 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3774 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3779 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3781 struct rte_eth_dev *dev;
3783 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3784 dev = &rte_eth_devices[port_id];
3786 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3787 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3792 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3794 struct rte_eth_dev *dev;
3796 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3797 dev = &rte_eth_devices[port_id];
3799 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3800 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3805 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3807 struct rte_eth_dev *dev;
3809 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3811 dev = &rte_eth_devices[port_id];
3812 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3813 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3817 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3819 struct rte_eth_dev *dev;
3821 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3823 dev = &rte_eth_devices[port_id];
3824 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3825 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3829 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3831 struct rte_eth_dev *dev;
3833 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3835 dev = &rte_eth_devices[port_id];
3836 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3837 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3841 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3843 struct rte_eth_dev *dev;
3845 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3847 dev = &rte_eth_devices[port_id];
3848 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3849 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3853 rte_eth_dev_get_dcb_info(uint16_t port_id,
3854 struct rte_eth_dcb_info *dcb_info)
3856 struct rte_eth_dev *dev;
3858 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3860 dev = &rte_eth_devices[port_id];
3861 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3863 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3864 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3868 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3869 struct rte_eth_l2_tunnel_conf *l2_tunnel)
3871 struct rte_eth_dev *dev;
3873 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3874 if (l2_tunnel == NULL) {
3875 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3879 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3880 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3884 dev = &rte_eth_devices[port_id];
3885 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3887 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3892 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3893 struct rte_eth_l2_tunnel_conf *l2_tunnel,
3897 struct rte_eth_dev *dev;
3899 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3901 if (l2_tunnel == NULL) {
3902 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3906 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3907 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3912 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3916 dev = &rte_eth_devices[port_id];
3917 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3919 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3920 l2_tunnel, mask, en));
3924 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3925 const struct rte_eth_desc_lim *desc_lim)
3927 if (desc_lim->nb_align != 0)
3928 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3930 if (desc_lim->nb_max != 0)
3931 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3933 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3937 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3938 uint16_t *nb_rx_desc,
3939 uint16_t *nb_tx_desc)
3941 struct rte_eth_dev *dev;
3942 struct rte_eth_dev_info dev_info;
3944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3946 dev = &rte_eth_devices[port_id];
3947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3949 rte_eth_dev_info_get(port_id, &dev_info);
3951 if (nb_rx_desc != NULL)
3952 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3954 if (nb_tx_desc != NULL)
3955 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3961 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3963 struct rte_eth_dev *dev;
3965 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3970 dev = &rte_eth_devices[port_id];
3972 if (*dev->dev_ops->pool_ops_supported == NULL)
3973 return 1; /* all pools are supported */
3975 return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3978 RTE_INIT(ethdev_init_log);
3980 ethdev_init_log(void)
3982 ethdev_logtype = rte_log_register("lib.ethdev");
3983 if (ethdev_logtype >= 0)
3984 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);