1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
16 #include <netinet/in.h>
18 #include <rte_byteorder.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
40 #include "rte_ether.h"
41 #include "rte_ethdev.h"
42 #include "rte_ethdev_driver.h"
43 #include "ethdev_profile.h"
45 static int ethdev_logtype;
47 #define ethdev_log(level, fmt, ...) \
48 rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
50 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
51 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
52 static uint8_t eth_dev_last_created_port;
54 /* spinlock for eth device callbacks */
55 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
57 /* spinlock for add/remove rx callbacks */
58 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60 /* spinlock for add/remove tx callbacks */
61 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
63 /* spinlock for shared data allocation */
64 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
66 /* store statistics names and its offset in stats structure */
67 struct rte_eth_xstats_name_off {
68 char name[RTE_ETH_XSTATS_NAME_SIZE];
72 /* Shared memory between primary and secondary processes. */
74 uint64_t next_owner_id;
75 rte_spinlock_t ownership_lock;
76 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
77 } *rte_eth_dev_shared_data;
79 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
80 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
81 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
82 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
83 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
84 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
85 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
86 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
87 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
91 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
93 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
94 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
95 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
96 {"errors", offsetof(struct rte_eth_stats, q_errors)},
99 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
100 sizeof(rte_rxq_stats_strings[0]))
102 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
103 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
104 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
106 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
107 sizeof(rte_txq_stats_strings[0]))
109 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
110 { DEV_RX_OFFLOAD_##_name, #_name }
112 static const struct {
115 } rte_rx_offload_names[] = {
116 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
117 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
118 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
119 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
120 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
121 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
122 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
123 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
124 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
125 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
126 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
127 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
128 RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
129 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
130 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
131 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
134 #undef RTE_RX_OFFLOAD_BIT2STR
136 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
137 { DEV_TX_OFFLOAD_##_name, #_name }
139 static const struct {
142 } rte_tx_offload_names[] = {
143 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
144 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
145 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
146 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
147 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
148 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
149 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
150 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
151 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
152 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
153 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
154 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
155 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
156 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
157 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
158 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
159 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
160 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
163 #undef RTE_TX_OFFLOAD_BIT2STR
166 * The user application callback description.
168 * It contains callback address to be registered by user application,
169 * the pointer to the parameters for callback, and the event type.
171 struct rte_eth_dev_callback {
172 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
173 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
174 void *cb_arg; /**< Parameter for callback */
175 void *ret_param; /**< Return parameter */
176 enum rte_eth_event_type event; /**< Interrupt event type */
177 uint32_t active; /**< Callback is executing */
186 rte_eth_find_next(uint16_t port_id)
188 while (port_id < RTE_MAX_ETHPORTS &&
189 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
190 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
193 if (port_id >= RTE_MAX_ETHPORTS)
194 return RTE_MAX_ETHPORTS;
200 rte_eth_dev_shared_data_prepare(void)
202 const unsigned flags = 0;
203 const struct rte_memzone *mz;
205 rte_spinlock_lock(&rte_eth_shared_data_lock);
207 if (rte_eth_dev_shared_data == NULL) {
208 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
209 /* Allocate port data and ownership shared memory. */
210 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
211 sizeof(*rte_eth_dev_shared_data),
212 rte_socket_id(), flags);
214 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
216 rte_panic("Cannot allocate ethdev shared data\n");
218 rte_eth_dev_shared_data = mz->addr;
219 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
220 rte_eth_dev_shared_data->next_owner_id =
221 RTE_ETH_DEV_NO_OWNER + 1;
222 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
223 memset(rte_eth_dev_shared_data->data, 0,
224 sizeof(rte_eth_dev_shared_data->data));
228 rte_spinlock_unlock(&rte_eth_shared_data_lock);
232 is_allocated(const struct rte_eth_dev *ethdev)
234 return ethdev->data->name[0] != '\0';
237 static struct rte_eth_dev *
238 _rte_eth_dev_allocated(const char *name)
242 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
243 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
244 strcmp(rte_eth_devices[i].data->name, name) == 0)
245 return &rte_eth_devices[i];
251 rte_eth_dev_allocated(const char *name)
253 struct rte_eth_dev *ethdev;
255 rte_eth_dev_shared_data_prepare();
257 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
259 ethdev = _rte_eth_dev_allocated(name);
261 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
267 rte_eth_dev_find_free_port(void)
271 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
272 /* Using shared name field to find a free port. */
273 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
274 RTE_ASSERT(rte_eth_devices[i].state ==
279 return RTE_MAX_ETHPORTS;
282 static struct rte_eth_dev *
283 eth_dev_get(uint16_t port_id)
285 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
287 eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
288 eth_dev->state = RTE_ETH_DEV_ATTACHED;
290 eth_dev_last_created_port = port_id;
296 rte_eth_dev_allocate(const char *name)
299 struct rte_eth_dev *eth_dev = NULL;
301 rte_eth_dev_shared_data_prepare();
303 /* Synchronize port creation between primary and secondary threads. */
304 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
306 port_id = rte_eth_dev_find_free_port();
307 if (port_id == RTE_MAX_ETHPORTS) {
308 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
312 if (_rte_eth_dev_allocated(name) != NULL) {
314 "Ethernet Device with name %s already allocated!",
319 eth_dev = eth_dev_get(port_id);
320 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
321 eth_dev->data->port_id = port_id;
322 eth_dev->data->mtu = ETHER_MTU;
325 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
328 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
334 * Attach to a port already registered by the primary process, which
335 * makes sure that the same device would have the same port id both
336 * in the primary and secondary process.
339 rte_eth_dev_attach_secondary(const char *name)
342 struct rte_eth_dev *eth_dev = NULL;
344 rte_eth_dev_shared_data_prepare();
346 /* Synchronize port attachment to primary port creation and release. */
347 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
349 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
350 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
353 if (i == RTE_MAX_ETHPORTS) {
355 "device %s is not driven by the primary process\n",
358 eth_dev = eth_dev_get(i);
359 RTE_ASSERT(eth_dev->data->port_id == i);
362 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
367 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
372 rte_eth_dev_shared_data_prepare();
374 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
376 eth_dev->state = RTE_ETH_DEV_UNUSED;
378 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
380 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
382 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
388 rte_eth_dev_is_valid_port(uint16_t port_id)
390 if (port_id >= RTE_MAX_ETHPORTS ||
391 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
398 rte_eth_is_valid_owner_id(uint64_t owner_id)
400 if (owner_id == RTE_ETH_DEV_NO_OWNER ||
401 rte_eth_dev_shared_data->next_owner_id <= owner_id) {
402 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016"PRIX64".\n", owner_id);
409 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
411 while (port_id < RTE_MAX_ETHPORTS &&
412 ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
413 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
414 rte_eth_devices[port_id].data->owner.id != owner_id))
417 if (port_id >= RTE_MAX_ETHPORTS)
418 return RTE_MAX_ETHPORTS;
423 int __rte_experimental
424 rte_eth_dev_owner_new(uint64_t *owner_id)
426 rte_eth_dev_shared_data_prepare();
428 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
430 *owner_id = rte_eth_dev_shared_data->next_owner_id++;
432 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
437 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
438 const struct rte_eth_dev_owner *new_owner)
440 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
441 struct rte_eth_dev_owner *port_owner;
444 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
445 RTE_PMD_DEBUG_TRACE("Port id %"PRIu16" is not allocated.\n", port_id);
449 if (!rte_eth_is_valid_owner_id(new_owner->id) &&
450 !rte_eth_is_valid_owner_id(old_owner_id))
453 port_owner = &rte_eth_devices[port_id].data->owner;
454 if (port_owner->id != old_owner_id) {
455 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
456 " by %s_%016"PRIX64".\n", port_id,
457 port_owner->name, port_owner->id);
461 sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
463 if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
464 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
467 port_owner->id = new_owner->id;
469 RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016"PRIX64".\n", port_id,
470 new_owner->name, new_owner->id);
475 int __rte_experimental
476 rte_eth_dev_owner_set(const uint16_t port_id,
477 const struct rte_eth_dev_owner *owner)
481 rte_eth_dev_shared_data_prepare();
483 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
487 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
491 int __rte_experimental
492 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
494 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
495 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
498 rte_eth_dev_shared_data_prepare();
500 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
502 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
504 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
508 void __rte_experimental
509 rte_eth_dev_owner_delete(const uint64_t owner_id)
513 rte_eth_dev_shared_data_prepare();
515 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
517 if (rte_eth_is_valid_owner_id(owner_id)) {
518 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
519 if (rte_eth_devices[port_id].data->owner.id == owner_id)
520 memset(&rte_eth_devices[port_id].data->owner, 0,
521 sizeof(struct rte_eth_dev_owner));
522 RTE_PMD_DEBUG_TRACE("All port owners owned by %016"PRIX64
523 " identifier have removed.\n", owner_id);
526 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
529 int __rte_experimental
530 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
533 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
535 rte_eth_dev_shared_data_prepare();
537 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
539 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
540 RTE_PMD_DEBUG_TRACE("Port id %"PRIu16" is not allocated.\n", port_id);
543 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
546 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
551 rte_eth_dev_socket_id(uint16_t port_id)
553 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
554 return rte_eth_devices[port_id].data->numa_node;
558 rte_eth_dev_get_sec_ctx(uint16_t port_id)
560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
561 return rte_eth_devices[port_id].security_ctx;
565 rte_eth_dev_count(void)
567 return rte_eth_dev_count_avail();
571 rte_eth_dev_count_avail(void)
578 RTE_ETH_FOREACH_DEV(p)
584 uint16_t __rte_experimental
585 rte_eth_dev_count_total(void)
587 uint16_t port, count = 0;
589 for (port = 0; port < RTE_MAX_ETHPORTS; port++)
590 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
597 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
601 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
604 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
608 /* shouldn't check 'rte_eth_devices[i].data',
609 * because it might be overwritten by VDEV PMD */
610 tmp = rte_eth_dev_shared_data->data[port_id].name;
616 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
621 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
625 for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
626 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
627 !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
637 eth_err(uint16_t port_id, int ret)
641 if (rte_eth_dev_is_removed(port_id))
646 /* attach the new device, then store port_id of the device */
648 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
650 int current = rte_eth_dev_count_total();
651 struct rte_devargs da;
654 memset(&da, 0, sizeof(da));
656 if ((devargs == NULL) || (port_id == NULL)) {
662 if (rte_devargs_parse(&da, "%s", devargs))
665 ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
669 /* no point looking at the port count if no port exists */
670 if (!rte_eth_dev_count_total()) {
671 ethdev_log(ERR, "No port found for device (%s)", da.name);
676 /* if nothing happened, there is a bug here, since some driver told us
677 * it did attach a device, but did not create a port.
678 * FIXME: race condition in case of plug-out of another device
680 if (current == rte_eth_dev_count_total()) {
685 *port_id = eth_dev_last_created_port;
693 /* detach the device, then store the name of the device */
695 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
697 struct rte_device *dev;
702 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
704 dev_flags = rte_eth_devices[port_id].data->dev_flags;
705 if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
707 "Port %" PRIu16 " is bonded, cannot detach", port_id);
711 dev = rte_eth_devices[port_id].device;
715 bus = rte_bus_find_by_device(dev);
719 ret = rte_eal_hotplug_remove(bus->name, dev->name);
723 rte_eth_dev_release_port(&rte_eth_devices[port_id]);
728 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
730 uint16_t old_nb_queues = dev->data->nb_rx_queues;
734 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
735 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
736 sizeof(dev->data->rx_queues[0]) * nb_queues,
737 RTE_CACHE_LINE_SIZE);
738 if (dev->data->rx_queues == NULL) {
739 dev->data->nb_rx_queues = 0;
742 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
743 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
745 rxq = dev->data->rx_queues;
747 for (i = nb_queues; i < old_nb_queues; i++)
748 (*dev->dev_ops->rx_queue_release)(rxq[i]);
749 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
750 RTE_CACHE_LINE_SIZE);
753 if (nb_queues > old_nb_queues) {
754 uint16_t new_qs = nb_queues - old_nb_queues;
756 memset(rxq + old_nb_queues, 0,
757 sizeof(rxq[0]) * new_qs);
760 dev->data->rx_queues = rxq;
762 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
763 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
765 rxq = dev->data->rx_queues;
767 for (i = nb_queues; i < old_nb_queues; i++)
768 (*dev->dev_ops->rx_queue_release)(rxq[i]);
770 rte_free(dev->data->rx_queues);
771 dev->data->rx_queues = NULL;
773 dev->data->nb_rx_queues = nb_queues;
778 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
780 struct rte_eth_dev *dev;
782 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
784 dev = &rte_eth_devices[port_id];
785 if (!dev->data->dev_started) {
787 "port %d must be started before start any queue\n", port_id);
791 if (rx_queue_id >= dev->data->nb_rx_queues) {
792 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
796 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
798 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
799 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
800 " already started\n",
801 rx_queue_id, port_id);
805 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
811 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
813 struct rte_eth_dev *dev;
815 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
817 dev = &rte_eth_devices[port_id];
818 if (rx_queue_id >= dev->data->nb_rx_queues) {
819 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
823 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
825 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
826 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
827 " already stopped\n",
828 rx_queue_id, port_id);
832 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
837 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
839 struct rte_eth_dev *dev;
841 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
843 dev = &rte_eth_devices[port_id];
844 if (!dev->data->dev_started) {
846 "port %d must be started before start any queue\n", port_id);
850 if (tx_queue_id >= dev->data->nb_tx_queues) {
851 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
855 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
857 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
858 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
859 " already started\n",
860 tx_queue_id, port_id);
864 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
870 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
872 struct rte_eth_dev *dev;
874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
876 dev = &rte_eth_devices[port_id];
877 if (tx_queue_id >= dev->data->nb_tx_queues) {
878 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
884 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
885 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
886 " already stopped\n",
887 tx_queue_id, port_id);
891 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
896 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
898 uint16_t old_nb_queues = dev->data->nb_tx_queues;
902 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
903 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
904 sizeof(dev->data->tx_queues[0]) * nb_queues,
905 RTE_CACHE_LINE_SIZE);
906 if (dev->data->tx_queues == NULL) {
907 dev->data->nb_tx_queues = 0;
910 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
911 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
913 txq = dev->data->tx_queues;
915 for (i = nb_queues; i < old_nb_queues; i++)
916 (*dev->dev_ops->tx_queue_release)(txq[i]);
917 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
918 RTE_CACHE_LINE_SIZE);
921 if (nb_queues > old_nb_queues) {
922 uint16_t new_qs = nb_queues - old_nb_queues;
924 memset(txq + old_nb_queues, 0,
925 sizeof(txq[0]) * new_qs);
928 dev->data->tx_queues = txq;
930 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
931 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
933 txq = dev->data->tx_queues;
935 for (i = nb_queues; i < old_nb_queues; i++)
936 (*dev->dev_ops->tx_queue_release)(txq[i]);
938 rte_free(dev->data->tx_queues);
939 dev->data->tx_queues = NULL;
941 dev->data->nb_tx_queues = nb_queues;
946 rte_eth_speed_bitflag(uint32_t speed, int duplex)
949 case ETH_SPEED_NUM_10M:
950 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
951 case ETH_SPEED_NUM_100M:
952 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
953 case ETH_SPEED_NUM_1G:
954 return ETH_LINK_SPEED_1G;
955 case ETH_SPEED_NUM_2_5G:
956 return ETH_LINK_SPEED_2_5G;
957 case ETH_SPEED_NUM_5G:
958 return ETH_LINK_SPEED_5G;
959 case ETH_SPEED_NUM_10G:
960 return ETH_LINK_SPEED_10G;
961 case ETH_SPEED_NUM_20G:
962 return ETH_LINK_SPEED_20G;
963 case ETH_SPEED_NUM_25G:
964 return ETH_LINK_SPEED_25G;
965 case ETH_SPEED_NUM_40G:
966 return ETH_LINK_SPEED_40G;
967 case ETH_SPEED_NUM_50G:
968 return ETH_LINK_SPEED_50G;
969 case ETH_SPEED_NUM_56G:
970 return ETH_LINK_SPEED_56G;
971 case ETH_SPEED_NUM_100G:
972 return ETH_LINK_SPEED_100G;
979 * A conversion function from rxmode bitfield API.
982 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
983 uint64_t *rx_offloads)
985 uint64_t offloads = 0;
987 if (rxmode->header_split == 1)
988 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
989 if (rxmode->hw_ip_checksum == 1)
990 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
991 if (rxmode->hw_vlan_filter == 1)
992 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
993 if (rxmode->hw_vlan_strip == 1)
994 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
995 if (rxmode->hw_vlan_extend == 1)
996 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
997 if (rxmode->jumbo_frame == 1)
998 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
999 if (rxmode->hw_strip_crc == 1)
1000 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1001 if (rxmode->enable_scatter == 1)
1002 offloads |= DEV_RX_OFFLOAD_SCATTER;
1003 if (rxmode->enable_lro == 1)
1004 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
1005 if (rxmode->hw_timestamp == 1)
1006 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
1007 if (rxmode->security == 1)
1008 offloads |= DEV_RX_OFFLOAD_SECURITY;
1010 *rx_offloads = offloads;
1013 const char * __rte_experimental
1014 rte_eth_dev_rx_offload_name(uint64_t offload)
1016 const char *name = "UNKNOWN";
1019 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1020 if (offload == rte_rx_offload_names[i].offload) {
1021 name = rte_rx_offload_names[i].name;
1029 const char * __rte_experimental
1030 rte_eth_dev_tx_offload_name(uint64_t offload)
1032 const char *name = "UNKNOWN";
1035 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1036 if (offload == rte_tx_offload_names[i].offload) {
1037 name = rte_tx_offload_names[i].name;
1046 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1047 const struct rte_eth_conf *dev_conf)
1049 struct rte_eth_dev *dev;
1050 struct rte_eth_dev_info dev_info;
1051 struct rte_eth_conf local_conf = *dev_conf;
1054 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1056 dev = &rte_eth_devices[port_id];
1058 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1059 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1061 rte_eth_dev_info_get(port_id, &dev_info);
1063 /* If number of queues specified by application for both Rx and Tx is
1064 * zero, use driver preferred values. This cannot be done individually
1065 * as it is valid for either Tx or Rx (but not both) to be zero.
1066 * If driver does not provide any preferred valued, fall back on
1069 if (nb_rx_q == 0 && nb_tx_q == 0) {
1070 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1072 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1073 nb_tx_q = dev_info.default_txportconf.nb_queues;
1075 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1078 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1079 RTE_PMD_DEBUG_TRACE(
1080 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1081 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1085 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1086 RTE_PMD_DEBUG_TRACE(
1087 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1088 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1092 if (dev->data->dev_started) {
1093 RTE_PMD_DEBUG_TRACE(
1094 "port %d must be stopped to allow configuration\n", port_id);
1099 * Convert between the offloads API to enable PMDs to support
1102 if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1103 rte_eth_convert_rx_offload_bitfield(
1104 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1106 /* Copy the dev_conf parameter into the dev structure */
1107 memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1110 * Check that the numbers of RX and TX queues are not greater
1111 * than the maximum number of RX and TX queues supported by the
1112 * configured device.
1114 if (nb_rx_q > dev_info.max_rx_queues) {
1115 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1116 port_id, nb_rx_q, dev_info.max_rx_queues);
1120 if (nb_tx_q > dev_info.max_tx_queues) {
1121 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1122 port_id, nb_tx_q, dev_info.max_tx_queues);
1126 /* Check that the device supports requested interrupts */
1127 if ((dev_conf->intr_conf.lsc == 1) &&
1128 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1129 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1130 dev->device->driver->name);
1133 if ((dev_conf->intr_conf.rmv == 1) &&
1134 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1135 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1136 dev->device->driver->name);
1141 * If jumbo frames are enabled, check that the maximum RX packet
1142 * length is supported by the configured device.
1144 if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1145 if (dev_conf->rxmode.max_rx_pkt_len >
1146 dev_info.max_rx_pktlen) {
1147 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1148 " > max valid value %u\n",
1150 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1151 (unsigned)dev_info.max_rx_pktlen);
1153 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1154 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1155 " < min valid value %u\n",
1157 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1158 (unsigned)ETHER_MIN_LEN);
1162 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1163 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1164 /* Use default value */
1165 dev->data->dev_conf.rxmode.max_rx_pkt_len =
1169 /* Any requested offloading must be within its device capabilities */
1170 if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1171 local_conf.rxmode.offloads) {
1172 ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
1173 "0x%" PRIx64 " doesn't match Rx offloads "
1174 "capabilities 0x%" PRIx64 " in %s()\n",
1176 local_conf.rxmode.offloads,
1177 dev_info.rx_offload_capa,
1179 /* Will return -EINVAL in the next release */
1181 if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1182 local_conf.txmode.offloads) {
1183 ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
1184 "0x%" PRIx64 " doesn't match Tx offloads "
1185 "capabilities 0x%" PRIx64 " in %s()\n",
1187 local_conf.txmode.offloads,
1188 dev_info.tx_offload_capa,
1190 /* Will return -EINVAL in the next release */
1193 /* Check that device supports requested rss hash functions. */
1194 if ((dev_info.flow_type_rss_offloads |
1195 dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1196 dev_info.flow_type_rss_offloads) {
1197 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
1198 "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1200 dev_conf->rx_adv_conf.rss_conf.rss_hf,
1201 dev_info.flow_type_rss_offloads);
1205 * Setup new number of RX/TX queues and reconfigure device.
1207 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1209 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1214 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1216 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1218 rte_eth_dev_rx_queue_config(dev, 0);
1222 diag = (*dev->dev_ops->dev_configure)(dev);
1224 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1226 rte_eth_dev_rx_queue_config(dev, 0);
1227 rte_eth_dev_tx_queue_config(dev, 0);
1228 return eth_err(port_id, diag);
1231 /* Initialize Rx profiling if enabled at compilation time. */
1232 diag = __rte_eth_profile_rx_init(port_id, dev);
1234 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1236 rte_eth_dev_rx_queue_config(dev, 0);
1237 rte_eth_dev_tx_queue_config(dev, 0);
1238 return eth_err(port_id, diag);
1245 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1247 if (dev->data->dev_started) {
1248 RTE_PMD_DEBUG_TRACE(
1249 "port %d must be stopped to allow reset\n",
1250 dev->data->port_id);
1254 rte_eth_dev_rx_queue_config(dev, 0);
1255 rte_eth_dev_tx_queue_config(dev, 0);
1257 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1261 rte_eth_dev_config_restore(uint16_t port_id)
1263 struct rte_eth_dev *dev;
1264 struct rte_eth_dev_info dev_info;
1265 struct ether_addr *addr;
1270 dev = &rte_eth_devices[port_id];
1272 rte_eth_dev_info_get(port_id, &dev_info);
1274 /* replay MAC address configuration including default MAC */
1275 addr = &dev->data->mac_addrs[0];
1276 if (*dev->dev_ops->mac_addr_set != NULL)
1277 (*dev->dev_ops->mac_addr_set)(dev, addr);
1278 else if (*dev->dev_ops->mac_addr_add != NULL)
1279 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1281 if (*dev->dev_ops->mac_addr_add != NULL) {
1282 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1283 addr = &dev->data->mac_addrs[i];
1285 /* skip zero address */
1286 if (is_zero_ether_addr(addr))
1290 pool_mask = dev->data->mac_pool_sel[i];
1293 if (pool_mask & 1ULL)
1294 (*dev->dev_ops->mac_addr_add)(dev,
1298 } while (pool_mask);
1302 /* replay promiscuous configuration */
1303 if (rte_eth_promiscuous_get(port_id) == 1)
1304 rte_eth_promiscuous_enable(port_id);
1305 else if (rte_eth_promiscuous_get(port_id) == 0)
1306 rte_eth_promiscuous_disable(port_id);
1308 /* replay all multicast configuration */
1309 if (rte_eth_allmulticast_get(port_id) == 1)
1310 rte_eth_allmulticast_enable(port_id);
1311 else if (rte_eth_allmulticast_get(port_id) == 0)
1312 rte_eth_allmulticast_disable(port_id);
1316 rte_eth_dev_start(uint16_t port_id)
1318 struct rte_eth_dev *dev;
1321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1323 dev = &rte_eth_devices[port_id];
1325 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1327 if (dev->data->dev_started != 0) {
1328 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1329 " already started\n",
1334 diag = (*dev->dev_ops->dev_start)(dev);
1336 dev->data->dev_started = 1;
1338 return eth_err(port_id, diag);
1340 rte_eth_dev_config_restore(port_id);
1342 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1343 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1344 (*dev->dev_ops->link_update)(dev, 0);
1350 rte_eth_dev_stop(uint16_t port_id)
1352 struct rte_eth_dev *dev;
1354 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1355 dev = &rte_eth_devices[port_id];
1357 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1359 if (dev->data->dev_started == 0) {
1360 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1361 " already stopped\n",
1366 dev->data->dev_started = 0;
1367 (*dev->dev_ops->dev_stop)(dev);
1371 rte_eth_dev_set_link_up(uint16_t port_id)
1373 struct rte_eth_dev *dev;
1375 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1377 dev = &rte_eth_devices[port_id];
1379 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1380 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1384 rte_eth_dev_set_link_down(uint16_t port_id)
1386 struct rte_eth_dev *dev;
1388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1390 dev = &rte_eth_devices[port_id];
1392 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1393 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1397 rte_eth_dev_close(uint16_t port_id)
1399 struct rte_eth_dev *dev;
1401 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1402 dev = &rte_eth_devices[port_id];
1404 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1405 dev->data->dev_started = 0;
1406 (*dev->dev_ops->dev_close)(dev);
1408 dev->data->nb_rx_queues = 0;
1409 rte_free(dev->data->rx_queues);
1410 dev->data->rx_queues = NULL;
1411 dev->data->nb_tx_queues = 0;
1412 rte_free(dev->data->tx_queues);
1413 dev->data->tx_queues = NULL;
1417 rte_eth_dev_reset(uint16_t port_id)
1419 struct rte_eth_dev *dev;
1422 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1423 dev = &rte_eth_devices[port_id];
1425 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1427 rte_eth_dev_stop(port_id);
1428 ret = dev->dev_ops->dev_reset(dev);
1430 return eth_err(port_id, ret);
1433 int __rte_experimental
1434 rte_eth_dev_is_removed(uint16_t port_id)
1436 struct rte_eth_dev *dev;
1439 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1441 dev = &rte_eth_devices[port_id];
1443 if (dev->state == RTE_ETH_DEV_REMOVED)
1446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1448 ret = dev->dev_ops->is_removed(dev);
1450 /* Device is physically removed. */
1451 dev->state = RTE_ETH_DEV_REMOVED;
1457 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1458 uint16_t nb_rx_desc, unsigned int socket_id,
1459 const struct rte_eth_rxconf *rx_conf,
1460 struct rte_mempool *mp)
1463 uint32_t mbp_buf_size;
1464 struct rte_eth_dev *dev;
1465 struct rte_eth_dev_info dev_info;
1466 struct rte_eth_rxconf local_conf;
1469 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1471 dev = &rte_eth_devices[port_id];
1472 if (rx_queue_id >= dev->data->nb_rx_queues) {
1473 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1477 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1478 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1481 * Check the size of the mbuf data buffer.
1482 * This value must be provided in the private data of the memory pool.
1483 * First check that the memory pool has a valid private data.
1485 rte_eth_dev_info_get(port_id, &dev_info);
1486 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1487 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1488 mp->name, (int) mp->private_data_size,
1489 (int) sizeof(struct rte_pktmbuf_pool_private));
1492 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1494 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1495 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1496 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1500 (int)(RTE_PKTMBUF_HEADROOM +
1501 dev_info.min_rx_bufsize),
1502 (int)RTE_PKTMBUF_HEADROOM,
1503 (int)dev_info.min_rx_bufsize);
1507 /* Use default specified by driver, if nb_rx_desc is zero */
1508 if (nb_rx_desc == 0) {
1509 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1510 /* If driver default is also zero, fall back on EAL default */
1511 if (nb_rx_desc == 0)
1512 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1515 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1516 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1517 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1519 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1520 "should be: <= %hu, = %hu, and a product of %hu\n",
1522 dev_info.rx_desc_lim.nb_max,
1523 dev_info.rx_desc_lim.nb_min,
1524 dev_info.rx_desc_lim.nb_align);
1528 if (dev->data->dev_started &&
1529 !(dev_info.dev_capa &
1530 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1533 if (dev->data->rx_queue_state[rx_queue_id] !=
1534 RTE_ETH_QUEUE_STATE_STOPPED)
1537 rxq = dev->data->rx_queues;
1538 if (rxq[rx_queue_id]) {
1539 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1541 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1542 rxq[rx_queue_id] = NULL;
1545 if (rx_conf == NULL)
1546 rx_conf = &dev_info.default_rxconf;
1548 local_conf = *rx_conf;
1549 if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1551 * Reflect port offloads to queue offloads in order for
1552 * offloads to not be discarded.
1554 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1555 &local_conf.offloads);
1559 * If an offloading has already been enabled in
1560 * rte_eth_dev_configure(), it has been enabled on all queues,
1561 * so there is no need to enable it in this queue again.
1562 * The local_conf.offloads input to underlying PMD only carries
1563 * those offloadings which are only enabled on this queue and
1564 * not enabled on all queues.
1566 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1569 * New added offloadings for this queue are those not enabled in
1570 * rte_eth_dev_configure() and they must be per-queue type.
1571 * A pure per-port offloading can't be enabled on a queue while
1572 * disabled on another queue. A pure per-port offloading can't
1573 * be enabled for any queue as new added one if it hasn't been
1574 * enabled in rte_eth_dev_configure().
1576 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1577 local_conf.offloads) {
1578 ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
1579 "added offloads 0x%" PRIx64 " must be "
1580 "within pre-queue offload capabilities 0x%"
1581 PRIx64 " in %s()\n",
1584 local_conf.offloads,
1585 dev_info.rx_queue_offload_capa,
1587 /* Will return -EINVAL in the next release */
1590 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1591 socket_id, &local_conf, mp);
1593 if (!dev->data->min_rx_buf_size ||
1594 dev->data->min_rx_buf_size > mbp_buf_size)
1595 dev->data->min_rx_buf_size = mbp_buf_size;
1598 return eth_err(port_id, ret);
1602 * Convert from tx offloads to txq_flags.
1605 rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
1609 if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1610 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1611 if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1612 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1613 if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1614 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1615 if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1616 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1617 if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1618 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1619 if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1620 flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
1626 * A conversion function from txq_flags API.
1629 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1631 uint64_t offloads = 0;
1633 if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1634 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1635 if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1636 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1637 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1638 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1639 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1640 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1641 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1642 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1643 if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1644 (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1645 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1647 *tx_offloads = offloads;
1651 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1652 uint16_t nb_tx_desc, unsigned int socket_id,
1653 const struct rte_eth_txconf *tx_conf)
1655 struct rte_eth_dev *dev;
1656 struct rte_eth_dev_info dev_info;
1657 struct rte_eth_txconf local_conf;
1660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1662 dev = &rte_eth_devices[port_id];
1663 if (tx_queue_id >= dev->data->nb_tx_queues) {
1664 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1668 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1669 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1671 rte_eth_dev_info_get(port_id, &dev_info);
1673 /* Use default specified by driver, if nb_tx_desc is zero */
1674 if (nb_tx_desc == 0) {
1675 nb_tx_desc = dev_info.default_txportconf.ring_size;
1676 /* If driver default is zero, fall back on EAL default */
1677 if (nb_tx_desc == 0)
1678 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1680 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1681 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1682 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1683 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1684 "should be: <= %hu, = %hu, and a product of %hu\n",
1686 dev_info.tx_desc_lim.nb_max,
1687 dev_info.tx_desc_lim.nb_min,
1688 dev_info.tx_desc_lim.nb_align);
1692 if (dev->data->dev_started &&
1693 !(dev_info.dev_capa &
1694 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1697 if (dev->data->tx_queue_state[tx_queue_id] !=
1698 RTE_ETH_QUEUE_STATE_STOPPED)
1701 txq = dev->data->tx_queues;
1702 if (txq[tx_queue_id]) {
1703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1705 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1706 txq[tx_queue_id] = NULL;
1709 if (tx_conf == NULL)
1710 tx_conf = &dev_info.default_txconf;
1713 * Convert between the offloads API to enable PMDs to support
1716 local_conf = *tx_conf;
1717 if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1718 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1719 &local_conf.offloads);
1723 * If an offloading has already been enabled in
1724 * rte_eth_dev_configure(), it has been enabled on all queues,
1725 * so there is no need to enable it in this queue again.
1726 * The local_conf.offloads input to underlying PMD only carries
1727 * those offloadings which are only enabled on this queue and
1728 * not enabled on all queues.
1730 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1733 * New added offloadings for this queue are those not enabled in
1734 * rte_eth_dev_configure() and they must be per-queue type.
1735 * A pure per-port offloading can't be enabled on a queue while
1736 * disabled on another queue. A pure per-port offloading can't
1737 * be enabled for any queue as new added one if it hasn't been
1738 * enabled in rte_eth_dev_configure().
1740 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1741 local_conf.offloads) {
1742 ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
1743 "added offloads 0x%" PRIx64 " must be "
1744 "within pre-queue offload capabilities 0x%"
1745 PRIx64 " in %s()\n",
1748 local_conf.offloads,
1749 dev_info.tx_queue_offload_capa,
1751 /* Will return -EINVAL in the next release */
1754 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1755 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1759 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1760 void *userdata __rte_unused)
1764 for (i = 0; i < unsent; i++)
1765 rte_pktmbuf_free(pkts[i]);
1769 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1772 uint64_t *count = userdata;
1775 for (i = 0; i < unsent; i++)
1776 rte_pktmbuf_free(pkts[i]);
1782 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1783 buffer_tx_error_fn cbfn, void *userdata)
1785 buffer->error_callback = cbfn;
1786 buffer->error_userdata = userdata;
1791 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1798 buffer->size = size;
1799 if (buffer->error_callback == NULL) {
1800 ret = rte_eth_tx_buffer_set_err_callback(
1801 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1808 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1810 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1813 /* Validate Input Data. Bail if not valid or not supported. */
1814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1815 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1817 /* Call driver to free pending mbufs. */
1818 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1820 return eth_err(port_id, ret);
1824 rte_eth_promiscuous_enable(uint16_t port_id)
1826 struct rte_eth_dev *dev;
1828 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1829 dev = &rte_eth_devices[port_id];
1831 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1832 (*dev->dev_ops->promiscuous_enable)(dev);
1833 dev->data->promiscuous = 1;
1837 rte_eth_promiscuous_disable(uint16_t port_id)
1839 struct rte_eth_dev *dev;
1841 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1842 dev = &rte_eth_devices[port_id];
1844 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1845 dev->data->promiscuous = 0;
1846 (*dev->dev_ops->promiscuous_disable)(dev);
1850 rte_eth_promiscuous_get(uint16_t port_id)
1852 struct rte_eth_dev *dev;
1854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1856 dev = &rte_eth_devices[port_id];
1857 return dev->data->promiscuous;
1861 rte_eth_allmulticast_enable(uint16_t port_id)
1863 struct rte_eth_dev *dev;
1865 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1866 dev = &rte_eth_devices[port_id];
1868 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1869 (*dev->dev_ops->allmulticast_enable)(dev);
1870 dev->data->all_multicast = 1;
1874 rte_eth_allmulticast_disable(uint16_t port_id)
1876 struct rte_eth_dev *dev;
1878 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1879 dev = &rte_eth_devices[port_id];
1881 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1882 dev->data->all_multicast = 0;
1883 (*dev->dev_ops->allmulticast_disable)(dev);
1887 rte_eth_allmulticast_get(uint16_t port_id)
1889 struct rte_eth_dev *dev;
1891 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1893 dev = &rte_eth_devices[port_id];
1894 return dev->data->all_multicast;
1898 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1900 struct rte_eth_dev *dev;
1902 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1903 dev = &rte_eth_devices[port_id];
1905 if (dev->data->dev_conf.intr_conf.lsc &&
1906 dev->data->dev_started)
1907 rte_eth_linkstatus_get(dev, eth_link);
1909 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1910 (*dev->dev_ops->link_update)(dev, 1);
1911 *eth_link = dev->data->dev_link;
1916 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1918 struct rte_eth_dev *dev;
1920 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1921 dev = &rte_eth_devices[port_id];
1923 if (dev->data->dev_conf.intr_conf.lsc &&
1924 dev->data->dev_started)
1925 rte_eth_linkstatus_get(dev, eth_link);
1927 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1928 (*dev->dev_ops->link_update)(dev, 0);
1929 *eth_link = dev->data->dev_link;
1934 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1936 struct rte_eth_dev *dev;
1938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1940 dev = &rte_eth_devices[port_id];
1941 memset(stats, 0, sizeof(*stats));
1943 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1944 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1945 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1949 rte_eth_stats_reset(uint16_t port_id)
1951 struct rte_eth_dev *dev;
1953 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1954 dev = &rte_eth_devices[port_id];
1956 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1957 (*dev->dev_ops->stats_reset)(dev);
1958 dev->data->rx_mbuf_alloc_failed = 0;
1964 get_xstats_basic_count(struct rte_eth_dev *dev)
1966 uint16_t nb_rxqs, nb_txqs;
1969 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1970 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1972 count = RTE_NB_STATS;
1973 count += nb_rxqs * RTE_NB_RXQ_STATS;
1974 count += nb_txqs * RTE_NB_TXQ_STATS;
1980 get_xstats_count(uint16_t port_id)
1982 struct rte_eth_dev *dev;
1985 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1986 dev = &rte_eth_devices[port_id];
1987 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1988 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1991 return eth_err(port_id, count);
1993 if (dev->dev_ops->xstats_get_names != NULL) {
1994 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1996 return eth_err(port_id, count);
2001 count += get_xstats_basic_count(dev);
2007 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2010 int cnt_xstats, idx_xstat;
2012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2015 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
2020 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
2025 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2026 if (cnt_xstats < 0) {
2027 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
2031 /* Get id-name lookup table */
2032 struct rte_eth_xstat_name xstats_names[cnt_xstats];
2034 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2035 port_id, xstats_names, cnt_xstats, NULL)) {
2036 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
2040 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2041 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2050 /* retrieve basic stats names */
2052 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2053 struct rte_eth_xstat_name *xstats_names)
2055 int cnt_used_entries = 0;
2056 uint32_t idx, id_queue;
2059 for (idx = 0; idx < RTE_NB_STATS; idx++) {
2060 snprintf(xstats_names[cnt_used_entries].name,
2061 sizeof(xstats_names[0].name),
2062 "%s", rte_stats_strings[idx].name);
2065 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2066 for (id_queue = 0; id_queue < num_q; id_queue++) {
2067 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2068 snprintf(xstats_names[cnt_used_entries].name,
2069 sizeof(xstats_names[0].name),
2071 id_queue, rte_rxq_stats_strings[idx].name);
2076 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2077 for (id_queue = 0; id_queue < num_q; id_queue++) {
2078 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2079 snprintf(xstats_names[cnt_used_entries].name,
2080 sizeof(xstats_names[0].name),
2082 id_queue, rte_txq_stats_strings[idx].name);
2086 return cnt_used_entries;
2089 /* retrieve ethdev extended statistics names */
2091 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2092 struct rte_eth_xstat_name *xstats_names, unsigned int size,
2095 struct rte_eth_xstat_name *xstats_names_copy;
2096 unsigned int no_basic_stat_requested = 1;
2097 unsigned int no_ext_stat_requested = 1;
2098 unsigned int expected_entries;
2099 unsigned int basic_count;
2100 struct rte_eth_dev *dev;
2104 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2105 dev = &rte_eth_devices[port_id];
2107 basic_count = get_xstats_basic_count(dev);
2108 ret = get_xstats_count(port_id);
2111 expected_entries = (unsigned int)ret;
2113 /* Return max number of stats if no ids given */
2116 return expected_entries;
2117 else if (xstats_names && size < expected_entries)
2118 return expected_entries;
2121 if (ids && !xstats_names)
2124 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2125 uint64_t ids_copy[size];
2127 for (i = 0; i < size; i++) {
2128 if (ids[i] < basic_count) {
2129 no_basic_stat_requested = 0;
2134 * Convert ids to xstats ids that PMD knows.
2135 * ids known by user are basic + extended stats.
2137 ids_copy[i] = ids[i] - basic_count;
2140 if (no_basic_stat_requested)
2141 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2142 xstats_names, ids_copy, size);
2145 /* Retrieve all stats */
2147 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2149 if (num_stats < 0 || num_stats > (int)expected_entries)
2152 return expected_entries;
2155 xstats_names_copy = calloc(expected_entries,
2156 sizeof(struct rte_eth_xstat_name));
2158 if (!xstats_names_copy) {
2159 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2164 for (i = 0; i < size; i++) {
2165 if (ids[i] >= basic_count) {
2166 no_ext_stat_requested = 0;
2172 /* Fill xstats_names_copy structure */
2173 if (ids && no_ext_stat_requested) {
2174 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2176 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2179 free(xstats_names_copy);
2185 for (i = 0; i < size; i++) {
2186 if (ids[i] >= expected_entries) {
2187 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2188 free(xstats_names_copy);
2191 xstats_names[i] = xstats_names_copy[ids[i]];
2194 free(xstats_names_copy);
2199 rte_eth_xstats_get_names(uint16_t port_id,
2200 struct rte_eth_xstat_name *xstats_names,
2203 struct rte_eth_dev *dev;
2204 int cnt_used_entries;
2205 int cnt_expected_entries;
2206 int cnt_driver_entries;
2208 cnt_expected_entries = get_xstats_count(port_id);
2209 if (xstats_names == NULL || cnt_expected_entries < 0 ||
2210 (int)size < cnt_expected_entries)
2211 return cnt_expected_entries;
2213 /* port_id checked in get_xstats_count() */
2214 dev = &rte_eth_devices[port_id];
2216 cnt_used_entries = rte_eth_basic_stats_get_names(
2219 if (dev->dev_ops->xstats_get_names != NULL) {
2220 /* If there are any driver-specific xstats, append them
2223 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2225 xstats_names + cnt_used_entries,
2226 size - cnt_used_entries);
2227 if (cnt_driver_entries < 0)
2228 return eth_err(port_id, cnt_driver_entries);
2229 cnt_used_entries += cnt_driver_entries;
2232 return cnt_used_entries;
2237 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2239 struct rte_eth_dev *dev;
2240 struct rte_eth_stats eth_stats;
2241 unsigned int count = 0, i, q;
2242 uint64_t val, *stats_ptr;
2243 uint16_t nb_rxqs, nb_txqs;
2246 ret = rte_eth_stats_get(port_id, ð_stats);
2250 dev = &rte_eth_devices[port_id];
2252 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2253 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2256 for (i = 0; i < RTE_NB_STATS; i++) {
2257 stats_ptr = RTE_PTR_ADD(ð_stats,
2258 rte_stats_strings[i].offset);
2260 xstats[count++].value = val;
2264 for (q = 0; q < nb_rxqs; q++) {
2265 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2266 stats_ptr = RTE_PTR_ADD(ð_stats,
2267 rte_rxq_stats_strings[i].offset +
2268 q * sizeof(uint64_t));
2270 xstats[count++].value = val;
2275 for (q = 0; q < nb_txqs; q++) {
2276 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2277 stats_ptr = RTE_PTR_ADD(ð_stats,
2278 rte_txq_stats_strings[i].offset +
2279 q * sizeof(uint64_t));
2281 xstats[count++].value = val;
2287 /* retrieve ethdev extended statistics */
2289 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2290 uint64_t *values, unsigned int size)
2292 unsigned int no_basic_stat_requested = 1;
2293 unsigned int no_ext_stat_requested = 1;
2294 unsigned int num_xstats_filled;
2295 unsigned int basic_count;
2296 uint16_t expected_entries;
2297 struct rte_eth_dev *dev;
2301 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2302 ret = get_xstats_count(port_id);
2305 expected_entries = (uint16_t)ret;
2306 struct rte_eth_xstat xstats[expected_entries];
2307 dev = &rte_eth_devices[port_id];
2308 basic_count = get_xstats_basic_count(dev);
2310 /* Return max number of stats if no ids given */
2313 return expected_entries;
2314 else if (values && size < expected_entries)
2315 return expected_entries;
2321 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2322 unsigned int basic_count = get_xstats_basic_count(dev);
2323 uint64_t ids_copy[size];
2325 for (i = 0; i < size; i++) {
2326 if (ids[i] < basic_count) {
2327 no_basic_stat_requested = 0;
2332 * Convert ids to xstats ids that PMD knows.
2333 * ids known by user are basic + extended stats.
2335 ids_copy[i] = ids[i] - basic_count;
2338 if (no_basic_stat_requested)
2339 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2344 for (i = 0; i < size; i++) {
2345 if (ids[i] >= basic_count) {
2346 no_ext_stat_requested = 0;
2352 /* Fill the xstats structure */
2353 if (ids && no_ext_stat_requested)
2354 ret = rte_eth_basic_stats_get(port_id, xstats);
2356 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2360 num_xstats_filled = (unsigned int)ret;
2362 /* Return all stats */
2364 for (i = 0; i < num_xstats_filled; i++)
2365 values[i] = xstats[i].value;
2366 return expected_entries;
2370 for (i = 0; i < size; i++) {
2371 if (ids[i] >= expected_entries) {
2372 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2375 values[i] = xstats[ids[i]].value;
2381 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2384 struct rte_eth_dev *dev;
2385 unsigned int count = 0, i;
2386 signed int xcount = 0;
2387 uint16_t nb_rxqs, nb_txqs;
2390 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2392 dev = &rte_eth_devices[port_id];
2394 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2395 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2397 /* Return generic statistics */
2398 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2399 (nb_txqs * RTE_NB_TXQ_STATS);
2401 /* implemented by the driver */
2402 if (dev->dev_ops->xstats_get != NULL) {
2403 /* Retrieve the xstats from the driver at the end of the
2406 xcount = (*dev->dev_ops->xstats_get)(dev,
2407 xstats ? xstats + count : NULL,
2408 (n > count) ? n - count : 0);
2411 return eth_err(port_id, xcount);
2414 if (n < count + xcount || xstats == NULL)
2415 return count + xcount;
2417 /* now fill the xstats structure */
2418 ret = rte_eth_basic_stats_get(port_id, xstats);
2423 for (i = 0; i < count; i++)
2425 /* add an offset to driver-specific stats */
2426 for ( ; i < count + xcount; i++)
2427 xstats[i].id += count;
2429 return count + xcount;
2432 /* reset ethdev extended statistics */
2434 rte_eth_xstats_reset(uint16_t port_id)
2436 struct rte_eth_dev *dev;
2438 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2439 dev = &rte_eth_devices[port_id];
2441 /* implemented by the driver */
2442 if (dev->dev_ops->xstats_reset != NULL) {
2443 (*dev->dev_ops->xstats_reset)(dev);
2447 /* fallback to default */
2448 rte_eth_stats_reset(port_id);
2452 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2455 struct rte_eth_dev *dev;
2457 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2459 dev = &rte_eth_devices[port_id];
2461 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2462 return (*dev->dev_ops->queue_stats_mapping_set)
2463 (dev, queue_id, stat_idx, is_rx);
2468 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2471 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2472 stat_idx, STAT_QMAP_TX));
2477 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2480 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2481 stat_idx, STAT_QMAP_RX));
2485 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2487 struct rte_eth_dev *dev;
2489 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2490 dev = &rte_eth_devices[port_id];
2492 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2493 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2494 fw_version, fw_size));
2498 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2500 struct rte_eth_dev *dev;
2501 struct rte_eth_txconf *txconf;
2502 const struct rte_eth_desc_lim lim = {
2503 .nb_max = UINT16_MAX,
2508 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2509 dev = &rte_eth_devices[port_id];
2511 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2512 dev_info->rx_desc_lim = lim;
2513 dev_info->tx_desc_lim = lim;
2514 dev_info->device = dev->device;
2516 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2517 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2518 dev_info->driver_name = dev->device->driver->name;
2519 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2520 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2522 dev_info->dev_flags = &dev->data->dev_flags;
2523 txconf = &dev_info->default_txconf;
2524 /* convert offload to txq_flags to support legacy app */
2525 rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
2529 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2530 uint32_t *ptypes, int num)
2533 struct rte_eth_dev *dev;
2534 const uint32_t *all_ptypes;
2536 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2537 dev = &rte_eth_devices[port_id];
2538 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2539 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2544 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2545 if (all_ptypes[i] & ptype_mask) {
2547 ptypes[j] = all_ptypes[i];
2555 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2557 struct rte_eth_dev *dev;
2559 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2560 dev = &rte_eth_devices[port_id];
2561 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2566 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2568 struct rte_eth_dev *dev;
2570 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2572 dev = &rte_eth_devices[port_id];
2573 *mtu = dev->data->mtu;
2578 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2581 struct rte_eth_dev *dev;
2583 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2584 dev = &rte_eth_devices[port_id];
2585 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2587 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2589 dev->data->mtu = mtu;
2591 return eth_err(port_id, ret);
2595 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2597 struct rte_eth_dev *dev;
2600 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2601 dev = &rte_eth_devices[port_id];
2602 if (!(dev->data->dev_conf.rxmode.offloads &
2603 DEV_RX_OFFLOAD_VLAN_FILTER)) {
2604 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2608 if (vlan_id > 4095) {
2609 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2610 port_id, (unsigned) vlan_id);
2613 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2615 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2617 struct rte_vlan_filter_conf *vfc;
2621 vfc = &dev->data->vlan_filter_conf;
2622 vidx = vlan_id / 64;
2623 vbit = vlan_id % 64;
2626 vfc->ids[vidx] |= UINT64_C(1) << vbit;
2628 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2631 return eth_err(port_id, ret);
2635 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2638 struct rte_eth_dev *dev;
2640 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2641 dev = &rte_eth_devices[port_id];
2642 if (rx_queue_id >= dev->data->nb_rx_queues) {
2643 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2647 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2648 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2654 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2655 enum rte_vlan_type vlan_type,
2658 struct rte_eth_dev *dev;
2660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2661 dev = &rte_eth_devices[port_id];
2662 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2664 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2669 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2671 struct rte_eth_dev *dev;
2675 uint64_t orig_offloads;
2677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2678 dev = &rte_eth_devices[port_id];
2680 /* save original values in case of failure */
2681 orig_offloads = dev->data->dev_conf.rxmode.offloads;
2683 /*check which option changed by application*/
2684 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2685 org = !!(dev->data->dev_conf.rxmode.offloads &
2686 DEV_RX_OFFLOAD_VLAN_STRIP);
2689 dev->data->dev_conf.rxmode.offloads |=
2690 DEV_RX_OFFLOAD_VLAN_STRIP;
2692 dev->data->dev_conf.rxmode.offloads &=
2693 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2694 mask |= ETH_VLAN_STRIP_MASK;
2697 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2698 org = !!(dev->data->dev_conf.rxmode.offloads &
2699 DEV_RX_OFFLOAD_VLAN_FILTER);
2702 dev->data->dev_conf.rxmode.offloads |=
2703 DEV_RX_OFFLOAD_VLAN_FILTER;
2705 dev->data->dev_conf.rxmode.offloads &=
2706 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2707 mask |= ETH_VLAN_FILTER_MASK;
2710 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2711 org = !!(dev->data->dev_conf.rxmode.offloads &
2712 DEV_RX_OFFLOAD_VLAN_EXTEND);
2715 dev->data->dev_conf.rxmode.offloads |=
2716 DEV_RX_OFFLOAD_VLAN_EXTEND;
2718 dev->data->dev_conf.rxmode.offloads &=
2719 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2720 mask |= ETH_VLAN_EXTEND_MASK;
2727 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2728 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2730 /* hit an error restore original values */
2731 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2734 return eth_err(port_id, ret);
2738 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2740 struct rte_eth_dev *dev;
2743 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2744 dev = &rte_eth_devices[port_id];
2746 if (dev->data->dev_conf.rxmode.offloads &
2747 DEV_RX_OFFLOAD_VLAN_STRIP)
2748 ret |= ETH_VLAN_STRIP_OFFLOAD;
2750 if (dev->data->dev_conf.rxmode.offloads &
2751 DEV_RX_OFFLOAD_VLAN_FILTER)
2752 ret |= ETH_VLAN_FILTER_OFFLOAD;
2754 if (dev->data->dev_conf.rxmode.offloads &
2755 DEV_RX_OFFLOAD_VLAN_EXTEND)
2756 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2762 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2764 struct rte_eth_dev *dev;
2766 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2767 dev = &rte_eth_devices[port_id];
2768 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2770 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2774 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2776 struct rte_eth_dev *dev;
2778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2779 dev = &rte_eth_devices[port_id];
2780 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2781 memset(fc_conf, 0, sizeof(*fc_conf));
2782 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2786 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2788 struct rte_eth_dev *dev;
2790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2791 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2792 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2796 dev = &rte_eth_devices[port_id];
2797 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2798 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2802 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2803 struct rte_eth_pfc_conf *pfc_conf)
2805 struct rte_eth_dev *dev;
2807 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2808 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2809 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2813 dev = &rte_eth_devices[port_id];
2814 /* High water, low water validation are device specific */
2815 if (*dev->dev_ops->priority_flow_ctrl_set)
2816 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2822 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2830 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2831 for (i = 0; i < num; i++) {
2832 if (reta_conf[i].mask)
2840 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2844 uint16_t i, idx, shift;
2850 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2854 for (i = 0; i < reta_size; i++) {
2855 idx = i / RTE_RETA_GROUP_SIZE;
2856 shift = i % RTE_RETA_GROUP_SIZE;
2857 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2858 (reta_conf[idx].reta[shift] >= max_rxq)) {
2859 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2860 "the maximum rxq index: %u\n", idx, shift,
2861 reta_conf[idx].reta[shift], max_rxq);
2870 rte_eth_dev_rss_reta_update(uint16_t port_id,
2871 struct rte_eth_rss_reta_entry64 *reta_conf,
2874 struct rte_eth_dev *dev;
2877 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2878 /* Check mask bits */
2879 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2883 dev = &rte_eth_devices[port_id];
2885 /* Check entry value */
2886 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2887 dev->data->nb_rx_queues);
2891 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2892 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2897 rte_eth_dev_rss_reta_query(uint16_t port_id,
2898 struct rte_eth_rss_reta_entry64 *reta_conf,
2901 struct rte_eth_dev *dev;
2904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2906 /* Check mask bits */
2907 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2911 dev = &rte_eth_devices[port_id];
2912 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2913 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2918 rte_eth_dev_rss_hash_update(uint16_t port_id,
2919 struct rte_eth_rss_conf *rss_conf)
2921 struct rte_eth_dev *dev;
2922 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2925 dev = &rte_eth_devices[port_id];
2926 rte_eth_dev_info_get(port_id, &dev_info);
2927 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2928 dev_info.flow_type_rss_offloads) {
2929 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
2930 "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2933 dev_info.flow_type_rss_offloads);
2935 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2936 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2941 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2942 struct rte_eth_rss_conf *rss_conf)
2944 struct rte_eth_dev *dev;
2946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2947 dev = &rte_eth_devices[port_id];
2948 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2949 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2954 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2955 struct rte_eth_udp_tunnel *udp_tunnel)
2957 struct rte_eth_dev *dev;
2959 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2960 if (udp_tunnel == NULL) {
2961 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2965 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2966 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2970 dev = &rte_eth_devices[port_id];
2971 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2972 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2977 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2978 struct rte_eth_udp_tunnel *udp_tunnel)
2980 struct rte_eth_dev *dev;
2982 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2983 dev = &rte_eth_devices[port_id];
2985 if (udp_tunnel == NULL) {
2986 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2990 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2991 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2995 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2996 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3001 rte_eth_led_on(uint16_t port_id)
3003 struct rte_eth_dev *dev;
3005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3006 dev = &rte_eth_devices[port_id];
3007 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3008 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3012 rte_eth_led_off(uint16_t port_id)
3014 struct rte_eth_dev *dev;
3016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3017 dev = &rte_eth_devices[port_id];
3018 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3019 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3023 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3027 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3029 struct rte_eth_dev_info dev_info;
3030 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3033 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3034 rte_eth_dev_info_get(port_id, &dev_info);
3036 for (i = 0; i < dev_info.max_mac_addrs; i++)
3037 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
3043 static const struct ether_addr null_mac_addr;
3046 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
3049 struct rte_eth_dev *dev;
3054 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3055 dev = &rte_eth_devices[port_id];
3056 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3058 if (is_zero_ether_addr(addr)) {
3059 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3063 if (pool >= ETH_64_POOLS) {
3064 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
3068 index = get_mac_addr_index(port_id, addr);
3070 index = get_mac_addr_index(port_id, &null_mac_addr);
3072 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3077 pool_mask = dev->data->mac_pool_sel[index];
3079 /* Check if both MAC address and pool is already there, and do nothing */
3080 if (pool_mask & (1ULL << pool))
3085 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3088 /* Update address in NIC data structure */
3089 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3091 /* Update pool bitmap in NIC data structure */
3092 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3095 return eth_err(port_id, ret);
3099 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3101 struct rte_eth_dev *dev;
3104 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3105 dev = &rte_eth_devices[port_id];
3106 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3108 index = get_mac_addr_index(port_id, addr);
3110 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
3112 } else if (index < 0)
3113 return 0; /* Do nothing if address wasn't found */
3116 (*dev->dev_ops->mac_addr_remove)(dev, index);
3118 /* Update address in NIC data structure */
3119 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3121 /* reset pool bitmap */
3122 dev->data->mac_pool_sel[index] = 0;
3128 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3130 struct rte_eth_dev *dev;
3133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3135 if (!is_valid_assigned_ether_addr(addr))
3138 dev = &rte_eth_devices[port_id];
3139 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3141 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3145 /* Update default address in NIC data structure */
3146 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3153 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3157 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3159 struct rte_eth_dev_info dev_info;
3160 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3163 rte_eth_dev_info_get(port_id, &dev_info);
3164 if (!dev->data->hash_mac_addrs)
3167 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3168 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3169 ETHER_ADDR_LEN) == 0)
3176 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3181 struct rte_eth_dev *dev;
3183 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3185 dev = &rte_eth_devices[port_id];
3186 if (is_zero_ether_addr(addr)) {
3187 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3192 index = get_hash_mac_addr_index(port_id, addr);
3193 /* Check if it's already there, and do nothing */
3194 if ((index >= 0) && on)
3199 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3200 "set in UTA\n", port_id);
3204 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3206 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3212 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3213 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3215 /* Update address in NIC data structure */
3217 ether_addr_copy(addr,
3218 &dev->data->hash_mac_addrs[index]);
3220 ether_addr_copy(&null_mac_addr,
3221 &dev->data->hash_mac_addrs[index]);
3224 return eth_err(port_id, ret);
3228 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3230 struct rte_eth_dev *dev;
3232 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3234 dev = &rte_eth_devices[port_id];
3236 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3237 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3241 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3244 struct rte_eth_dev *dev;
3245 struct rte_eth_dev_info dev_info;
3246 struct rte_eth_link link;
3248 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3250 dev = &rte_eth_devices[port_id];
3251 rte_eth_dev_info_get(port_id, &dev_info);
3252 link = dev->data->dev_link;
3254 if (queue_idx > dev_info.max_tx_queues) {
3255 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3256 "invalid queue id=%d\n", port_id, queue_idx);
3260 if (tx_rate > link.link_speed) {
3261 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3262 "bigger than link speed= %d\n",
3263 tx_rate, link.link_speed);
3267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3268 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3269 queue_idx, tx_rate));
3273 rte_eth_mirror_rule_set(uint16_t port_id,
3274 struct rte_eth_mirror_conf *mirror_conf,
3275 uint8_t rule_id, uint8_t on)
3277 struct rte_eth_dev *dev;
3279 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3280 if (mirror_conf->rule_type == 0) {
3281 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3285 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3286 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3291 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3292 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3293 (mirror_conf->pool_mask == 0)) {
3294 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3298 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3299 mirror_conf->vlan.vlan_mask == 0) {
3300 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3304 dev = &rte_eth_devices[port_id];
3305 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3307 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3308 mirror_conf, rule_id, on));
3312 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3314 struct rte_eth_dev *dev;
3316 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3318 dev = &rte_eth_devices[port_id];
3319 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3321 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3325 RTE_INIT(eth_dev_init_cb_lists)
3329 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3330 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3334 rte_eth_dev_callback_register(uint16_t port_id,
3335 enum rte_eth_event_type event,
3336 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3338 struct rte_eth_dev *dev;
3339 struct rte_eth_dev_callback *user_cb;
3340 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3346 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3347 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3351 if (port_id == RTE_ETH_ALL) {
3353 last_port = RTE_MAX_ETHPORTS - 1;
3355 next_port = last_port = port_id;
3358 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3361 dev = &rte_eth_devices[next_port];
3363 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3364 if (user_cb->cb_fn == cb_fn &&
3365 user_cb->cb_arg == cb_arg &&
3366 user_cb->event == event) {
3371 /* create a new callback. */
3372 if (user_cb == NULL) {
3373 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3374 sizeof(struct rte_eth_dev_callback), 0);
3375 if (user_cb != NULL) {
3376 user_cb->cb_fn = cb_fn;
3377 user_cb->cb_arg = cb_arg;
3378 user_cb->event = event;
3379 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3382 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3383 rte_eth_dev_callback_unregister(port_id, event,
3389 } while (++next_port <= last_port);
3391 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3396 rte_eth_dev_callback_unregister(uint16_t port_id,
3397 enum rte_eth_event_type event,
3398 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3401 struct rte_eth_dev *dev;
3402 struct rte_eth_dev_callback *cb, *next;
3403 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3409 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3410 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3414 if (port_id == RTE_ETH_ALL) {
3416 last_port = RTE_MAX_ETHPORTS - 1;
3418 next_port = last_port = port_id;
3421 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3424 dev = &rte_eth_devices[next_port];
3426 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3429 next = TAILQ_NEXT(cb, next);
3431 if (cb->cb_fn != cb_fn || cb->event != event ||
3432 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3436 * if this callback is not executing right now,
3439 if (cb->active == 0) {
3440 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3446 } while (++next_port <= last_port);
3448 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3453 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3454 enum rte_eth_event_type event, void *ret_param)
3456 struct rte_eth_dev_callback *cb_lst;
3457 struct rte_eth_dev_callback dev_cb;
3460 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3461 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3462 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3466 if (ret_param != NULL)
3467 dev_cb.ret_param = ret_param;
3469 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3470 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3471 dev_cb.cb_arg, dev_cb.ret_param);
3472 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3475 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3480 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3487 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3490 struct rte_eth_dev *dev;
3491 struct rte_intr_handle *intr_handle;
3495 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3497 dev = &rte_eth_devices[port_id];
3499 if (!dev->intr_handle) {
3500 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3504 intr_handle = dev->intr_handle;
3505 if (!intr_handle->intr_vec) {
3506 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3510 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3511 vec = intr_handle->intr_vec[qid];
3512 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3513 if (rc && rc != -EEXIST) {
3514 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3515 " op %d epfd %d vec %u\n",
3516 port_id, qid, op, epfd, vec);
3523 const struct rte_memzone *
3524 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3525 uint16_t queue_id, size_t size, unsigned align,
3528 char z_name[RTE_MEMZONE_NAMESIZE];
3529 const struct rte_memzone *mz;
3531 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3532 dev->device->driver->name, ring_name,
3533 dev->data->port_id, queue_id);
3535 mz = rte_memzone_lookup(z_name);
3539 return rte_memzone_reserve_aligned(z_name, size, socket_id,
3540 RTE_MEMZONE_IOVA_CONTIG, align);
3543 int __rte_experimental
3544 rte_eth_dev_create(struct rte_device *device, const char *name,
3545 size_t priv_data_size,
3546 ethdev_bus_specific_init ethdev_bus_specific_init,
3547 void *bus_init_params,
3548 ethdev_init_t ethdev_init, void *init_params)
3550 struct rte_eth_dev *ethdev;
3553 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3555 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3556 ethdev = rte_eth_dev_allocate(name);
3562 if (priv_data_size) {
3563 ethdev->data->dev_private = rte_zmalloc_socket(
3564 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3567 if (!ethdev->data->dev_private) {
3568 RTE_LOG(ERR, EAL, "failed to allocate private data");
3574 ethdev = rte_eth_dev_attach_secondary(name);
3576 RTE_LOG(ERR, EAL, "secondary process attach failed, "
3577 "ethdev doesn't exist");
3583 ethdev->device = device;
3585 if (ethdev_bus_specific_init) {
3586 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3589 "ethdev bus specific initialisation failed");
3594 retval = ethdev_init(ethdev, init_params);
3596 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3600 rte_eth_dev_probing_finish(ethdev);
3604 /* free ports private data if primary process */
3605 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3606 rte_free(ethdev->data->dev_private);
3608 rte_eth_dev_release_port(ethdev);
3613 int __rte_experimental
3614 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3615 ethdev_uninit_t ethdev_uninit)
3619 ethdev = rte_eth_dev_allocated(ethdev->data->name);
3623 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3624 if (ethdev_uninit) {
3625 ret = ethdev_uninit(ethdev);
3630 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3631 rte_free(ethdev->data->dev_private);
3633 ethdev->data->dev_private = NULL;
3635 return rte_eth_dev_release_port(ethdev);
3639 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3640 int epfd, int op, void *data)
3643 struct rte_eth_dev *dev;
3644 struct rte_intr_handle *intr_handle;
3647 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3649 dev = &rte_eth_devices[port_id];
3650 if (queue_id >= dev->data->nb_rx_queues) {
3651 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3655 if (!dev->intr_handle) {
3656 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3660 intr_handle = dev->intr_handle;
3661 if (!intr_handle->intr_vec) {
3662 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3666 vec = intr_handle->intr_vec[queue_id];
3667 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3668 if (rc && rc != -EEXIST) {
3669 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3670 " op %d epfd %d vec %u\n",
3671 port_id, queue_id, op, epfd, vec);
3679 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3682 struct rte_eth_dev *dev;
3684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3686 dev = &rte_eth_devices[port_id];
3688 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3689 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3694 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3697 struct rte_eth_dev *dev;
3699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3701 dev = &rte_eth_devices[port_id];
3703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3704 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3710 rte_eth_dev_filter_supported(uint16_t port_id,
3711 enum rte_filter_type filter_type)
3713 struct rte_eth_dev *dev;
3715 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3717 dev = &rte_eth_devices[port_id];
3718 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3719 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3720 RTE_ETH_FILTER_NOP, NULL);
3724 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3725 enum rte_filter_op filter_op, void *arg)
3727 struct rte_eth_dev *dev;
3729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3731 dev = &rte_eth_devices[port_id];
3732 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3733 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3737 const struct rte_eth_rxtx_callback *
3738 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3739 rte_rx_callback_fn fn, void *user_param)
3741 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3742 rte_errno = ENOTSUP;
3745 /* check input parameters */
3746 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3747 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3751 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3759 cb->param = user_param;
3761 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3762 /* Add the callbacks in fifo order. */
3763 struct rte_eth_rxtx_callback *tail =
3764 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3767 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3774 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3779 const struct rte_eth_rxtx_callback *
3780 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3781 rte_rx_callback_fn fn, void *user_param)
3783 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3784 rte_errno = ENOTSUP;
3787 /* check input parameters */
3788 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3789 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3794 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3802 cb->param = user_param;
3804 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3805 /* Add the callbacks at fisrt position*/
3806 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3808 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3809 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3814 const struct rte_eth_rxtx_callback *
3815 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3816 rte_tx_callback_fn fn, void *user_param)
3818 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3819 rte_errno = ENOTSUP;
3822 /* check input parameters */
3823 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3824 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3829 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3837 cb->param = user_param;
3839 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3840 /* Add the callbacks in fifo order. */
3841 struct rte_eth_rxtx_callback *tail =
3842 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3845 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3852 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3858 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3859 const struct rte_eth_rxtx_callback *user_cb)
3861 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3864 /* Check input parameters. */
3865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3866 if (user_cb == NULL ||
3867 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3870 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3871 struct rte_eth_rxtx_callback *cb;
3872 struct rte_eth_rxtx_callback **prev_cb;
3875 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3876 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3877 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3879 if (cb == user_cb) {
3880 /* Remove the user cb from the callback list. */
3881 *prev_cb = cb->next;
3886 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3892 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3893 const struct rte_eth_rxtx_callback *user_cb)
3895 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3898 /* Check input parameters. */
3899 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3900 if (user_cb == NULL ||
3901 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3904 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3906 struct rte_eth_rxtx_callback *cb;
3907 struct rte_eth_rxtx_callback **prev_cb;
3909 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3910 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3911 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3913 if (cb == user_cb) {
3914 /* Remove the user cb from the callback list. */
3915 *prev_cb = cb->next;
3920 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3926 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3927 struct rte_eth_rxq_info *qinfo)
3929 struct rte_eth_dev *dev;
3931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3936 dev = &rte_eth_devices[port_id];
3937 if (queue_id >= dev->data->nb_rx_queues) {
3938 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3942 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3944 memset(qinfo, 0, sizeof(*qinfo));
3945 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3950 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3951 struct rte_eth_txq_info *qinfo)
3953 struct rte_eth_dev *dev;
3954 struct rte_eth_txconf *txconf = &qinfo->conf;
3956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3961 dev = &rte_eth_devices[port_id];
3962 if (queue_id >= dev->data->nb_tx_queues) {
3963 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3967 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3969 memset(qinfo, 0, sizeof(*qinfo));
3970 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3971 /* convert offload to txq_flags to support legacy app */
3972 rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
3978 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3979 struct ether_addr *mc_addr_set,
3980 uint32_t nb_mc_addr)
3982 struct rte_eth_dev *dev;
3984 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3986 dev = &rte_eth_devices[port_id];
3987 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3988 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3989 mc_addr_set, nb_mc_addr));
3993 rte_eth_timesync_enable(uint16_t port_id)
3995 struct rte_eth_dev *dev;
3997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3998 dev = &rte_eth_devices[port_id];
4000 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4001 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4005 rte_eth_timesync_disable(uint16_t port_id)
4007 struct rte_eth_dev *dev;
4009 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4010 dev = &rte_eth_devices[port_id];
4012 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4013 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4017 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4020 struct rte_eth_dev *dev;
4022 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4023 dev = &rte_eth_devices[port_id];
4025 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4026 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4027 (dev, timestamp, flags));
4031 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4032 struct timespec *timestamp)
4034 struct rte_eth_dev *dev;
4036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4037 dev = &rte_eth_devices[port_id];
4039 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4040 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4045 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4047 struct rte_eth_dev *dev;
4049 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4050 dev = &rte_eth_devices[port_id];
4052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4053 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4058 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4060 struct rte_eth_dev *dev;
4062 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4063 dev = &rte_eth_devices[port_id];
4065 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4066 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4071 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4073 struct rte_eth_dev *dev;
4075 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4076 dev = &rte_eth_devices[port_id];
4078 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4079 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4084 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4086 struct rte_eth_dev *dev;
4088 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4090 dev = &rte_eth_devices[port_id];
4091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4092 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4096 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4098 struct rte_eth_dev *dev;
4100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4102 dev = &rte_eth_devices[port_id];
4103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4104 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4108 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4110 struct rte_eth_dev *dev;
4112 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4114 dev = &rte_eth_devices[port_id];
4115 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4116 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4120 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4122 struct rte_eth_dev *dev;
4124 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4126 dev = &rte_eth_devices[port_id];
4127 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4128 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4131 int __rte_experimental
4132 rte_eth_dev_get_module_info(uint16_t port_id,
4133 struct rte_eth_dev_module_info *modinfo)
4135 struct rte_eth_dev *dev;
4137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4139 dev = &rte_eth_devices[port_id];
4140 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4141 return (*dev->dev_ops->get_module_info)(dev, modinfo);
4144 int __rte_experimental
4145 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4146 struct rte_dev_eeprom_info *info)
4148 struct rte_eth_dev *dev;
4150 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4152 dev = &rte_eth_devices[port_id];
4153 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4154 return (*dev->dev_ops->get_module_eeprom)(dev, info);
4158 rte_eth_dev_get_dcb_info(uint16_t port_id,
4159 struct rte_eth_dcb_info *dcb_info)
4161 struct rte_eth_dev *dev;
4163 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4165 dev = &rte_eth_devices[port_id];
4166 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4168 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4169 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4173 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4174 struct rte_eth_l2_tunnel_conf *l2_tunnel)
4176 struct rte_eth_dev *dev;
4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4179 if (l2_tunnel == NULL) {
4180 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4184 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4185 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4189 dev = &rte_eth_devices[port_id];
4190 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4192 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4197 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4198 struct rte_eth_l2_tunnel_conf *l2_tunnel,
4202 struct rte_eth_dev *dev;
4204 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4206 if (l2_tunnel == NULL) {
4207 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4211 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4212 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4217 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4221 dev = &rte_eth_devices[port_id];
4222 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4224 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4225 l2_tunnel, mask, en));
4229 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4230 const struct rte_eth_desc_lim *desc_lim)
4232 if (desc_lim->nb_align != 0)
4233 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4235 if (desc_lim->nb_max != 0)
4236 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4238 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4242 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4243 uint16_t *nb_rx_desc,
4244 uint16_t *nb_tx_desc)
4246 struct rte_eth_dev *dev;
4247 struct rte_eth_dev_info dev_info;
4249 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4251 dev = &rte_eth_devices[port_id];
4252 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4254 rte_eth_dev_info_get(port_id, &dev_info);
4256 if (nb_rx_desc != NULL)
4257 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4259 if (nb_tx_desc != NULL)
4260 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4266 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4268 struct rte_eth_dev *dev;
4270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4275 dev = &rte_eth_devices[port_id];
4277 if (*dev->dev_ops->pool_ops_supported == NULL)
4278 return 1; /* all pools are supported */
4280 return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4284 * A set of values to describe the possible states of a switch domain.
4286 enum rte_eth_switch_domain_state {
4287 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4288 RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4292 * Array of switch domains available for allocation. Array is sized to
4293 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4294 * ethdev ports in a single process.
4296 struct rte_eth_dev_switch {
4297 enum rte_eth_switch_domain_state state;
4298 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4300 int __rte_experimental
4301 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4305 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4307 for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4308 i < RTE_MAX_ETHPORTS; i++) {
4309 if (rte_eth_switch_domains[i].state ==
4310 RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4311 rte_eth_switch_domains[i].state =
4312 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4321 int __rte_experimental
4322 rte_eth_switch_domain_free(uint16_t domain_id)
4324 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4325 domain_id >= RTE_MAX_ETHPORTS)
4328 if (rte_eth_switch_domains[domain_id].state !=
4329 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4332 rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4337 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4340 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4343 struct rte_kvargs_pair *pair;
4346 arglist->str = strdup(str_in);
4347 if (arglist->str == NULL)
4350 letter = arglist->str;
4353 pair = &arglist->pairs[0];
4356 case 0: /* Initial */
4359 else if (*letter == '\0')
4366 case 1: /* Parsing key */
4367 if (*letter == '=') {
4369 pair->value = letter + 1;
4371 } else if (*letter == ',' || *letter == '\0')
4376 case 2: /* Parsing value */
4379 else if (*letter == ',') {
4382 pair = &arglist->pairs[arglist->count];
4384 } else if (*letter == '\0') {
4387 pair = &arglist->pairs[arglist->count];
4392 case 3: /* Parsing list */
4395 else if (*letter == '\0')
4404 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4412 /* Single element, not a list */
4413 return callback(str, data);
4415 /* Sanity check, then strip the brackets */
4416 str_start = &str[strlen(str) - 1];
4417 if (*str_start != ']') {
4418 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4424 /* Process list elements */
4434 } else if (state == 1) {
4435 if (*str == ',' || *str == '\0') {
4436 if (str > str_start) {
4437 /* Non-empty string fragment */
4439 result = callback(str_start, data);
4452 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4453 const uint16_t max_list)
4455 uint16_t lo, hi, val;
4458 result = sscanf(str, "%hu-%hu", &lo, &hi);
4460 if (*len_list >= max_list)
4462 list[(*len_list)++] = lo;
4463 } else if (result == 2) {
4464 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4466 for (val = lo; val <= hi; val++) {
4467 if (*len_list >= max_list)
4469 list[(*len_list)++] = val;
4478 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4480 struct rte_eth_devargs *eth_da = data;
4482 return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4483 ð_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4486 int __rte_experimental
4487 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4489 struct rte_kvargs args;
4490 struct rte_kvargs_pair *pair;
4494 memset(eth_da, 0, sizeof(*eth_da));
4496 result = rte_eth_devargs_tokenise(&args, dargs);
4500 for (i = 0; i < args.count; i++) {
4501 pair = &args.pairs[i];
4502 if (strcmp("representor", pair->key) == 0) {
4503 result = rte_eth_devargs_parse_list(pair->value,
4504 rte_eth_devargs_parse_representor_ports,
4518 RTE_INIT(ethdev_init_log);
4520 ethdev_init_log(void)
4522 ethdev_logtype = rte_log_register("lib.ethdev");
4523 if (ethdev_logtype >= 0)
4524 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);