1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
16 #include <netinet/in.h>
18 #include <rte_byteorder.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
41 #include "rte_ether.h"
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
47 int rte_eth_dev_logtype;
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51 static uint16_t eth_dev_last_created_port;
53 /* spinlock for eth device callbacks */
54 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
56 /* spinlock for add/remove rx callbacks */
57 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59 /* spinlock for add/remove tx callbacks */
60 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 /* spinlock for shared data allocation */
63 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
65 /* store statistics names and its offset in stats structure */
66 struct rte_eth_xstats_name_off {
67 char name[RTE_ETH_XSTATS_NAME_SIZE];
71 /* Shared memory between primary and secondary processes. */
73 uint64_t next_owner_id;
74 rte_spinlock_t ownership_lock;
75 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
76 } *rte_eth_dev_shared_data;
78 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
79 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
80 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
81 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
82 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
83 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
84 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
85 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
86 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
90 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
92 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
93 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
94 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
95 {"errors", offsetof(struct rte_eth_stats, q_errors)},
98 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
99 sizeof(rte_rxq_stats_strings[0]))
101 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
102 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
103 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
105 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
106 sizeof(rte_txq_stats_strings[0]))
108 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
109 { DEV_RX_OFFLOAD_##_name, #_name }
111 static const struct {
114 } rte_rx_offload_names[] = {
115 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
116 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
117 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
118 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
119 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
120 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
121 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
122 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
123 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
124 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
125 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
126 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
127 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
130 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
131 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
132 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
135 #undef RTE_RX_OFFLOAD_BIT2STR
137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
138 { DEV_TX_OFFLOAD_##_name, #_name }
140 static const struct {
143 } rte_tx_offload_names[] = {
144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165 RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
168 #undef RTE_TX_OFFLOAD_BIT2STR
171 * The user application callback description.
173 * It contains callback address to be registered by user application,
174 * the pointer to the parameters for callback, and the event type.
176 struct rte_eth_dev_callback {
177 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
178 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
179 void *cb_arg; /**< Parameter for callback */
180 void *ret_param; /**< Return parameter */
181 enum rte_eth_event_type event; /**< Interrupt event type */
182 uint32_t active; /**< Callback is executing */
190 int __rte_experimental
191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 struct rte_devargs devargs = {.args = NULL};
195 const char *bus_param_key;
196 char *bus_str = NULL;
197 char *cls_str = NULL;
200 memset(iter, 0, sizeof(*iter));
203 * The devargs string may use various syntaxes:
204 * - 0000:08:00.0,representor=[1-3]
205 * - pci:0000:06:00.0,representor=[0,5]
206 * A new syntax is in development (not yet supported):
207 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
210 /* Split bus, device and parameters. */
211 ret = rte_devargs_parse(&devargs, devargs_str);
216 * Assume parameters of old syntax can match only at ethdev level.
217 * Extra parameters will be ignored, thanks to "+" prefix.
219 str_size = strlen(devargs.args) + 2;
220 cls_str = malloc(str_size);
221 if (cls_str == NULL) {
225 ret = snprintf(cls_str, str_size, "+%s", devargs.args);
226 if (ret != str_size - 1) {
230 iter->cls_str = cls_str;
231 free(devargs.args); /* allocated by rte_devargs_parse() */
234 iter->bus = devargs.bus;
235 if (iter->bus->dev_iterate == NULL) {
240 /* Convert bus args to new syntax for use with new API dev_iterate. */
241 if (strcmp(iter->bus->name, "vdev") == 0) {
242 bus_param_key = "name";
243 } else if (strcmp(iter->bus->name, "pci") == 0) {
244 bus_param_key = "addr";
249 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
250 bus_str = malloc(str_size);
251 if (bus_str == NULL) {
255 ret = snprintf(bus_str, str_size, "%s=%s",
256 bus_param_key, devargs.name);
257 if (ret != str_size - 1) {
261 iter->bus_str = bus_str;
263 iter->cls = rte_class_find_by_name("eth");
268 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
276 uint16_t __rte_experimental
277 rte_eth_iterator_next(struct rte_dev_iterator *iter)
279 if (iter->cls == NULL) /* invalid ethdev iterator */
280 return RTE_MAX_ETHPORTS;
282 do { /* loop to try all matching rte_device */
283 /* If not in middle of rte_eth_dev iteration, */
284 if (iter->class_device == NULL) {
285 /* get next rte_device to try. */
286 iter->device = iter->bus->dev_iterate(
287 iter->device, iter->bus_str, iter);
288 if (iter->device == NULL)
289 break; /* no more rte_device candidate */
291 /* A device is matching bus part, need to check ethdev part. */
292 iter->class_device = iter->cls->dev_iterate(
293 iter->class_device, iter->cls_str, iter);
294 if (iter->class_device != NULL)
295 return eth_dev_to_id(iter->class_device); /* match */
296 } while (1); /* need to try next rte_device */
298 /* No more ethdev port to iterate. */
299 rte_eth_iterator_cleanup(iter);
300 return RTE_MAX_ETHPORTS;
303 void __rte_experimental
304 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
306 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
307 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
308 memset(iter, 0, sizeof(*iter));
312 rte_eth_find_next(uint16_t port_id)
314 while (port_id < RTE_MAX_ETHPORTS &&
315 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
316 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
319 if (port_id >= RTE_MAX_ETHPORTS)
320 return RTE_MAX_ETHPORTS;
326 rte_eth_dev_shared_data_prepare(void)
328 const unsigned flags = 0;
329 const struct rte_memzone *mz;
331 rte_spinlock_lock(&rte_eth_shared_data_lock);
333 if (rte_eth_dev_shared_data == NULL) {
334 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
335 /* Allocate port data and ownership shared memory. */
336 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
337 sizeof(*rte_eth_dev_shared_data),
338 rte_socket_id(), flags);
340 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
342 rte_panic("Cannot allocate ethdev shared data\n");
344 rte_eth_dev_shared_data = mz->addr;
345 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
346 rte_eth_dev_shared_data->next_owner_id =
347 RTE_ETH_DEV_NO_OWNER + 1;
348 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
349 memset(rte_eth_dev_shared_data->data, 0,
350 sizeof(rte_eth_dev_shared_data->data));
354 rte_spinlock_unlock(&rte_eth_shared_data_lock);
358 is_allocated(const struct rte_eth_dev *ethdev)
360 return ethdev->data->name[0] != '\0';
363 static struct rte_eth_dev *
364 _rte_eth_dev_allocated(const char *name)
368 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
369 if (rte_eth_devices[i].data != NULL &&
370 strcmp(rte_eth_devices[i].data->name, name) == 0)
371 return &rte_eth_devices[i];
377 rte_eth_dev_allocated(const char *name)
379 struct rte_eth_dev *ethdev;
381 rte_eth_dev_shared_data_prepare();
383 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
385 ethdev = _rte_eth_dev_allocated(name);
387 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
393 rte_eth_dev_find_free_port(void)
397 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
398 /* Using shared name field to find a free port. */
399 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
400 RTE_ASSERT(rte_eth_devices[i].state ==
405 return RTE_MAX_ETHPORTS;
408 static struct rte_eth_dev *
409 eth_dev_get(uint16_t port_id)
411 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
413 eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
415 eth_dev_last_created_port = port_id;
421 rte_eth_dev_allocate(const char *name)
424 struct rte_eth_dev *eth_dev = NULL;
426 rte_eth_dev_shared_data_prepare();
428 /* Synchronize port creation between primary and secondary threads. */
429 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
431 if (_rte_eth_dev_allocated(name) != NULL) {
433 "Ethernet device with name %s already allocated\n",
438 port_id = rte_eth_dev_find_free_port();
439 if (port_id == RTE_MAX_ETHPORTS) {
441 "Reached maximum number of Ethernet ports\n");
445 eth_dev = eth_dev_get(port_id);
446 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
447 eth_dev->data->port_id = port_id;
448 eth_dev->data->mtu = ETHER_MTU;
451 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
457 * Attach to a port already registered by the primary process, which
458 * makes sure that the same device would have the same port id both
459 * in the primary and secondary process.
462 rte_eth_dev_attach_secondary(const char *name)
465 struct rte_eth_dev *eth_dev = NULL;
467 rte_eth_dev_shared_data_prepare();
469 /* Synchronize port attachment to primary port creation and release. */
470 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
472 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
476 if (i == RTE_MAX_ETHPORTS) {
478 "Device %s is not driven by the primary process\n",
481 eth_dev = eth_dev_get(i);
482 RTE_ASSERT(eth_dev->data->port_id == i);
485 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
490 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
495 rte_eth_dev_shared_data_prepare();
497 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
499 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
501 eth_dev->state = RTE_ETH_DEV_UNUSED;
503 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
504 rte_free(eth_dev->data->rx_queues);
505 rte_free(eth_dev->data->tx_queues);
506 rte_free(eth_dev->data->mac_addrs);
507 rte_free(eth_dev->data->hash_mac_addrs);
508 rte_free(eth_dev->data->dev_private);
509 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
512 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
518 rte_eth_dev_is_valid_port(uint16_t port_id)
520 if (port_id >= RTE_MAX_ETHPORTS ||
521 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
528 rte_eth_is_valid_owner_id(uint64_t owner_id)
530 if (owner_id == RTE_ETH_DEV_NO_OWNER ||
531 rte_eth_dev_shared_data->next_owner_id <= owner_id)
537 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
539 while (port_id < RTE_MAX_ETHPORTS &&
540 ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
541 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
542 rte_eth_devices[port_id].data->owner.id != owner_id))
545 if (port_id >= RTE_MAX_ETHPORTS)
546 return RTE_MAX_ETHPORTS;
551 int __rte_experimental
552 rte_eth_dev_owner_new(uint64_t *owner_id)
554 rte_eth_dev_shared_data_prepare();
556 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
558 *owner_id = rte_eth_dev_shared_data->next_owner_id++;
560 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
565 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
566 const struct rte_eth_dev_owner *new_owner)
568 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
569 struct rte_eth_dev_owner *port_owner;
572 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
573 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
578 if (!rte_eth_is_valid_owner_id(new_owner->id) &&
579 !rte_eth_is_valid_owner_id(old_owner_id)) {
581 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
582 old_owner_id, new_owner->id);
586 port_owner = &rte_eth_devices[port_id].data->owner;
587 if (port_owner->id != old_owner_id) {
589 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
590 port_id, port_owner->name, port_owner->id);
594 sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
596 if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
597 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
600 port_owner->id = new_owner->id;
602 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
603 port_id, new_owner->name, new_owner->id);
608 int __rte_experimental
609 rte_eth_dev_owner_set(const uint16_t port_id,
610 const struct rte_eth_dev_owner *owner)
614 rte_eth_dev_shared_data_prepare();
616 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
618 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
620 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
624 int __rte_experimental
625 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
627 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
628 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
631 rte_eth_dev_shared_data_prepare();
633 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
635 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
637 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
641 void __rte_experimental
642 rte_eth_dev_owner_delete(const uint64_t owner_id)
646 rte_eth_dev_shared_data_prepare();
648 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
650 if (rte_eth_is_valid_owner_id(owner_id)) {
651 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
652 if (rte_eth_devices[port_id].data->owner.id == owner_id)
653 memset(&rte_eth_devices[port_id].data->owner, 0,
654 sizeof(struct rte_eth_dev_owner));
655 RTE_ETHDEV_LOG(NOTICE,
656 "All port owners owned by %016"PRIx64" identifier have removed\n",
660 "Invalid owner id=%016"PRIx64"\n",
664 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
667 int __rte_experimental
668 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
671 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
673 rte_eth_dev_shared_data_prepare();
675 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
677 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
678 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
682 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
685 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
690 rte_eth_dev_socket_id(uint16_t port_id)
692 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
693 return rte_eth_devices[port_id].data->numa_node;
697 rte_eth_dev_get_sec_ctx(uint16_t port_id)
699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
700 return rte_eth_devices[port_id].security_ctx;
704 rte_eth_dev_count(void)
706 return rte_eth_dev_count_avail();
710 rte_eth_dev_count_avail(void)
717 RTE_ETH_FOREACH_DEV(p)
723 uint16_t __rte_experimental
724 rte_eth_dev_count_total(void)
726 uint16_t port, count = 0;
728 for (port = 0; port < RTE_MAX_ETHPORTS; port++)
729 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
736 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
740 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
743 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
747 /* shouldn't check 'rte_eth_devices[i].data',
748 * because it might be overwritten by VDEV PMD */
749 tmp = rte_eth_dev_shared_data->data[port_id].name;
755 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
760 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
764 for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
765 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
766 !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
776 eth_err(uint16_t port_id, int ret)
780 if (rte_eth_dev_is_removed(port_id))
785 /* attach the new device, then store port_id of the device */
787 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
789 int current = rte_eth_dev_count_total();
790 struct rte_devargs da;
793 memset(&da, 0, sizeof(da));
795 if ((devargs == NULL) || (port_id == NULL)) {
801 if (rte_devargs_parse(&da, devargs))
804 ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
808 /* no point looking at the port count if no port exists */
809 if (!rte_eth_dev_count_total()) {
810 RTE_ETHDEV_LOG(ERR, "No port found for device (%s)\n", da.name);
815 /* if nothing happened, there is a bug here, since some driver told us
816 * it did attach a device, but did not create a port.
817 * FIXME: race condition in case of plug-out of another device
819 if (current == rte_eth_dev_count_total()) {
824 *port_id = eth_dev_last_created_port;
832 /* detach the device, then store the name of the device */
834 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
836 struct rte_device *dev;
841 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
843 dev_flags = rte_eth_devices[port_id].data->dev_flags;
844 if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
846 "Port %"PRIu16" is bonded, cannot detach\n", port_id);
850 dev = rte_eth_devices[port_id].device;
854 bus = rte_bus_find_by_device(dev);
858 ret = rte_eal_hotplug_remove(bus->name, dev->name);
862 rte_eth_dev_release_port(&rte_eth_devices[port_id]);
867 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
869 uint16_t old_nb_queues = dev->data->nb_rx_queues;
873 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
874 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
875 sizeof(dev->data->rx_queues[0]) * nb_queues,
876 RTE_CACHE_LINE_SIZE);
877 if (dev->data->rx_queues == NULL) {
878 dev->data->nb_rx_queues = 0;
881 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
884 rxq = dev->data->rx_queues;
886 for (i = nb_queues; i < old_nb_queues; i++)
887 (*dev->dev_ops->rx_queue_release)(rxq[i]);
888 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
889 RTE_CACHE_LINE_SIZE);
892 if (nb_queues > old_nb_queues) {
893 uint16_t new_qs = nb_queues - old_nb_queues;
895 memset(rxq + old_nb_queues, 0,
896 sizeof(rxq[0]) * new_qs);
899 dev->data->rx_queues = rxq;
901 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
902 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
904 rxq = dev->data->rx_queues;
906 for (i = nb_queues; i < old_nb_queues; i++)
907 (*dev->dev_ops->rx_queue_release)(rxq[i]);
909 rte_free(dev->data->rx_queues);
910 dev->data->rx_queues = NULL;
912 dev->data->nb_rx_queues = nb_queues;
917 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
919 struct rte_eth_dev *dev;
921 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
923 dev = &rte_eth_devices[port_id];
924 if (!dev->data->dev_started) {
926 "Port %u must be started before start any queue\n",
931 if (rx_queue_id >= dev->data->nb_rx_queues) {
932 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
936 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
938 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
940 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
941 rx_queue_id, port_id);
945 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
951 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
953 struct rte_eth_dev *dev;
955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
957 dev = &rte_eth_devices[port_id];
958 if (rx_queue_id >= dev->data->nb_rx_queues) {
959 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
963 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
965 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
967 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
968 rx_queue_id, port_id);
972 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
977 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
979 struct rte_eth_dev *dev;
981 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
983 dev = &rte_eth_devices[port_id];
984 if (!dev->data->dev_started) {
986 "Port %u must be started before start any queue\n",
991 if (tx_queue_id >= dev->data->nb_tx_queues) {
992 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
996 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
998 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1000 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1001 tx_queue_id, port_id);
1005 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1009 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1011 struct rte_eth_dev *dev;
1013 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1015 dev = &rte_eth_devices[port_id];
1016 if (tx_queue_id >= dev->data->nb_tx_queues) {
1017 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1021 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1023 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1024 RTE_ETHDEV_LOG(INFO,
1025 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1026 tx_queue_id, port_id);
1030 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1035 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1037 uint16_t old_nb_queues = dev->data->nb_tx_queues;
1041 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1042 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1043 sizeof(dev->data->tx_queues[0]) * nb_queues,
1044 RTE_CACHE_LINE_SIZE);
1045 if (dev->data->tx_queues == NULL) {
1046 dev->data->nb_tx_queues = 0;
1049 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1050 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1052 txq = dev->data->tx_queues;
1054 for (i = nb_queues; i < old_nb_queues; i++)
1055 (*dev->dev_ops->tx_queue_release)(txq[i]);
1056 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1057 RTE_CACHE_LINE_SIZE);
1060 if (nb_queues > old_nb_queues) {
1061 uint16_t new_qs = nb_queues - old_nb_queues;
1063 memset(txq + old_nb_queues, 0,
1064 sizeof(txq[0]) * new_qs);
1067 dev->data->tx_queues = txq;
1069 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1070 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1072 txq = dev->data->tx_queues;
1074 for (i = nb_queues; i < old_nb_queues; i++)
1075 (*dev->dev_ops->tx_queue_release)(txq[i]);
1077 rte_free(dev->data->tx_queues);
1078 dev->data->tx_queues = NULL;
1080 dev->data->nb_tx_queues = nb_queues;
1085 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1088 case ETH_SPEED_NUM_10M:
1089 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1090 case ETH_SPEED_NUM_100M:
1091 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1092 case ETH_SPEED_NUM_1G:
1093 return ETH_LINK_SPEED_1G;
1094 case ETH_SPEED_NUM_2_5G:
1095 return ETH_LINK_SPEED_2_5G;
1096 case ETH_SPEED_NUM_5G:
1097 return ETH_LINK_SPEED_5G;
1098 case ETH_SPEED_NUM_10G:
1099 return ETH_LINK_SPEED_10G;
1100 case ETH_SPEED_NUM_20G:
1101 return ETH_LINK_SPEED_20G;
1102 case ETH_SPEED_NUM_25G:
1103 return ETH_LINK_SPEED_25G;
1104 case ETH_SPEED_NUM_40G:
1105 return ETH_LINK_SPEED_40G;
1106 case ETH_SPEED_NUM_50G:
1107 return ETH_LINK_SPEED_50G;
1108 case ETH_SPEED_NUM_56G:
1109 return ETH_LINK_SPEED_56G;
1110 case ETH_SPEED_NUM_100G:
1111 return ETH_LINK_SPEED_100G;
1117 const char * __rte_experimental
1118 rte_eth_dev_rx_offload_name(uint64_t offload)
1120 const char *name = "UNKNOWN";
1123 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1124 if (offload == rte_rx_offload_names[i].offload) {
1125 name = rte_rx_offload_names[i].name;
1133 const char * __rte_experimental
1134 rte_eth_dev_tx_offload_name(uint64_t offload)
1136 const char *name = "UNKNOWN";
1139 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1140 if (offload == rte_tx_offload_names[i].offload) {
1141 name = rte_tx_offload_names[i].name;
1150 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1151 const struct rte_eth_conf *dev_conf)
1153 struct rte_eth_dev *dev;
1154 struct rte_eth_dev_info dev_info;
1155 struct rte_eth_conf local_conf = *dev_conf;
1158 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1160 dev = &rte_eth_devices[port_id];
1162 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1163 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1165 rte_eth_dev_info_get(port_id, &dev_info);
1167 /* If number of queues specified by application for both Rx and Tx is
1168 * zero, use driver preferred values. This cannot be done individually
1169 * as it is valid for either Tx or Rx (but not both) to be zero.
1170 * If driver does not provide any preferred valued, fall back on
1173 if (nb_rx_q == 0 && nb_tx_q == 0) {
1174 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1176 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1177 nb_tx_q = dev_info.default_txportconf.nb_queues;
1179 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1182 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1184 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1185 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1189 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1191 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1192 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1196 if (dev->data->dev_started) {
1198 "Port %u must be stopped to allow configuration\n",
1203 /* Copy the dev_conf parameter into the dev structure */
1204 memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1207 * Check that the numbers of RX and TX queues are not greater
1208 * than the maximum number of RX and TX queues supported by the
1209 * configured device.
1211 if (nb_rx_q > dev_info.max_rx_queues) {
1212 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1213 port_id, nb_rx_q, dev_info.max_rx_queues);
1217 if (nb_tx_q > dev_info.max_tx_queues) {
1218 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1219 port_id, nb_tx_q, dev_info.max_tx_queues);
1223 /* Check that the device supports requested interrupts */
1224 if ((dev_conf->intr_conf.lsc == 1) &&
1225 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1226 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1227 dev->device->driver->name);
1230 if ((dev_conf->intr_conf.rmv == 1) &&
1231 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1232 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1233 dev->device->driver->name);
1238 * If jumbo frames are enabled, check that the maximum RX packet
1239 * length is supported by the configured device.
1241 if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1242 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1244 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1245 port_id, dev_conf->rxmode.max_rx_pkt_len,
1246 dev_info.max_rx_pktlen);
1248 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1250 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1251 port_id, dev_conf->rxmode.max_rx_pkt_len,
1252 (unsigned)ETHER_MIN_LEN);
1256 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1257 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1258 /* Use default value */
1259 dev->data->dev_conf.rxmode.max_rx_pkt_len =
1263 /* Any requested offloading must be within its device capabilities */
1264 if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1265 local_conf.rxmode.offloads) {
1267 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1268 "capabilities 0x%"PRIx64" in %s()\n",
1269 port_id, local_conf.rxmode.offloads,
1270 dev_info.rx_offload_capa,
1274 if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1275 local_conf.txmode.offloads) {
1277 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1278 "capabilities 0x%"PRIx64" in %s()\n",
1279 port_id, local_conf.txmode.offloads,
1280 dev_info.tx_offload_capa,
1285 /* Check that device supports requested rss hash functions. */
1286 if ((dev_info.flow_type_rss_offloads |
1287 dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1288 dev_info.flow_type_rss_offloads) {
1290 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1291 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1292 dev_info.flow_type_rss_offloads);
1297 * Setup new number of RX/TX queues and reconfigure device.
1299 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1302 "Port%u rte_eth_dev_rx_queue_config = %d\n",
1307 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1310 "Port%u rte_eth_dev_tx_queue_config = %d\n",
1312 rte_eth_dev_rx_queue_config(dev, 0);
1316 diag = (*dev->dev_ops->dev_configure)(dev);
1318 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1320 rte_eth_dev_rx_queue_config(dev, 0);
1321 rte_eth_dev_tx_queue_config(dev, 0);
1322 return eth_err(port_id, diag);
1325 /* Initialize Rx profiling if enabled at compilation time. */
1326 diag = __rte_eth_dev_profile_init(port_id, dev);
1328 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1330 rte_eth_dev_rx_queue_config(dev, 0);
1331 rte_eth_dev_tx_queue_config(dev, 0);
1332 return eth_err(port_id, diag);
1339 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1341 if (dev->data->dev_started) {
1342 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1343 dev->data->port_id);
1347 rte_eth_dev_rx_queue_config(dev, 0);
1348 rte_eth_dev_tx_queue_config(dev, 0);
1350 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1354 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1355 struct rte_eth_dev_info *dev_info)
1357 struct ether_addr *addr;
1362 /* replay MAC address configuration including default MAC */
1363 addr = &dev->data->mac_addrs[0];
1364 if (*dev->dev_ops->mac_addr_set != NULL)
1365 (*dev->dev_ops->mac_addr_set)(dev, addr);
1366 else if (*dev->dev_ops->mac_addr_add != NULL)
1367 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1369 if (*dev->dev_ops->mac_addr_add != NULL) {
1370 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1371 addr = &dev->data->mac_addrs[i];
1373 /* skip zero address */
1374 if (is_zero_ether_addr(addr))
1378 pool_mask = dev->data->mac_pool_sel[i];
1381 if (pool_mask & 1ULL)
1382 (*dev->dev_ops->mac_addr_add)(dev,
1386 } while (pool_mask);
1392 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1393 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1395 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1396 rte_eth_dev_mac_restore(dev, dev_info);
1398 /* replay promiscuous configuration */
1399 if (rte_eth_promiscuous_get(port_id) == 1)
1400 rte_eth_promiscuous_enable(port_id);
1401 else if (rte_eth_promiscuous_get(port_id) == 0)
1402 rte_eth_promiscuous_disable(port_id);
1404 /* replay all multicast configuration */
1405 if (rte_eth_allmulticast_get(port_id) == 1)
1406 rte_eth_allmulticast_enable(port_id);
1407 else if (rte_eth_allmulticast_get(port_id) == 0)
1408 rte_eth_allmulticast_disable(port_id);
1412 rte_eth_dev_start(uint16_t port_id)
1414 struct rte_eth_dev *dev;
1415 struct rte_eth_dev_info dev_info;
1418 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1420 dev = &rte_eth_devices[port_id];
1422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1424 if (dev->data->dev_started != 0) {
1425 RTE_ETHDEV_LOG(INFO,
1426 "Device with port_id=%"PRIu16" already started\n",
1431 rte_eth_dev_info_get(port_id, &dev_info);
1433 /* Lets restore MAC now if device does not support live change */
1434 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1435 rte_eth_dev_mac_restore(dev, &dev_info);
1437 diag = (*dev->dev_ops->dev_start)(dev);
1439 dev->data->dev_started = 1;
1441 return eth_err(port_id, diag);
1443 rte_eth_dev_config_restore(dev, &dev_info, port_id);
1445 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1447 (*dev->dev_ops->link_update)(dev, 0);
1453 rte_eth_dev_stop(uint16_t port_id)
1455 struct rte_eth_dev *dev;
1457 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1458 dev = &rte_eth_devices[port_id];
1460 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1462 if (dev->data->dev_started == 0) {
1463 RTE_ETHDEV_LOG(INFO,
1464 "Device with port_id=%"PRIu16" already stopped\n",
1469 dev->data->dev_started = 0;
1470 (*dev->dev_ops->dev_stop)(dev);
1474 rte_eth_dev_set_link_up(uint16_t port_id)
1476 struct rte_eth_dev *dev;
1478 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1480 dev = &rte_eth_devices[port_id];
1482 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1483 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1487 rte_eth_dev_set_link_down(uint16_t port_id)
1489 struct rte_eth_dev *dev;
1491 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1493 dev = &rte_eth_devices[port_id];
1495 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1496 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1500 rte_eth_dev_close(uint16_t port_id)
1502 struct rte_eth_dev *dev;
1504 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1505 dev = &rte_eth_devices[port_id];
1507 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1508 dev->data->dev_started = 0;
1509 (*dev->dev_ops->dev_close)(dev);
1511 /* check behaviour flag - temporary for PMD migration */
1512 if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1513 /* new behaviour: send event + reset state + free all data */
1514 rte_eth_dev_release_port(dev);
1517 RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1518 "The driver %s should migrate to the new behaviour.\n",
1519 dev->device->driver->name);
1520 /* old behaviour: only free queue arrays */
1521 dev->data->nb_rx_queues = 0;
1522 rte_free(dev->data->rx_queues);
1523 dev->data->rx_queues = NULL;
1524 dev->data->nb_tx_queues = 0;
1525 rte_free(dev->data->tx_queues);
1526 dev->data->tx_queues = NULL;
1530 rte_eth_dev_reset(uint16_t port_id)
1532 struct rte_eth_dev *dev;
1535 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1536 dev = &rte_eth_devices[port_id];
1538 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1540 rte_eth_dev_stop(port_id);
1541 ret = dev->dev_ops->dev_reset(dev);
1543 return eth_err(port_id, ret);
1546 int __rte_experimental
1547 rte_eth_dev_is_removed(uint16_t port_id)
1549 struct rte_eth_dev *dev;
1552 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1554 dev = &rte_eth_devices[port_id];
1556 if (dev->state == RTE_ETH_DEV_REMOVED)
1559 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1561 ret = dev->dev_ops->is_removed(dev);
1563 /* Device is physically removed. */
1564 dev->state = RTE_ETH_DEV_REMOVED;
1570 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1571 uint16_t nb_rx_desc, unsigned int socket_id,
1572 const struct rte_eth_rxconf *rx_conf,
1573 struct rte_mempool *mp)
1576 uint32_t mbp_buf_size;
1577 struct rte_eth_dev *dev;
1578 struct rte_eth_dev_info dev_info;
1579 struct rte_eth_rxconf local_conf;
1582 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1584 dev = &rte_eth_devices[port_id];
1585 if (rx_queue_id >= dev->data->nb_rx_queues) {
1586 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1590 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1591 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1594 * Check the size of the mbuf data buffer.
1595 * This value must be provided in the private data of the memory pool.
1596 * First check that the memory pool has a valid private data.
1598 rte_eth_dev_info_get(port_id, &dev_info);
1599 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1600 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1601 mp->name, (int)mp->private_data_size,
1602 (int)sizeof(struct rte_pktmbuf_pool_private));
1605 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1607 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1609 "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1610 mp->name, (int)mbp_buf_size,
1611 (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1612 (int)RTE_PKTMBUF_HEADROOM,
1613 (int)dev_info.min_rx_bufsize);
1617 /* Use default specified by driver, if nb_rx_desc is zero */
1618 if (nb_rx_desc == 0) {
1619 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1620 /* If driver default is also zero, fall back on EAL default */
1621 if (nb_rx_desc == 0)
1622 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1625 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1626 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1627 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1630 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1631 nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1632 dev_info.rx_desc_lim.nb_min,
1633 dev_info.rx_desc_lim.nb_align);
1637 if (dev->data->dev_started &&
1638 !(dev_info.dev_capa &
1639 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1642 if (dev->data->dev_started &&
1643 (dev->data->rx_queue_state[rx_queue_id] !=
1644 RTE_ETH_QUEUE_STATE_STOPPED))
1647 rxq = dev->data->rx_queues;
1648 if (rxq[rx_queue_id]) {
1649 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1651 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1652 rxq[rx_queue_id] = NULL;
1655 if (rx_conf == NULL)
1656 rx_conf = &dev_info.default_rxconf;
1658 local_conf = *rx_conf;
1661 * If an offloading has already been enabled in
1662 * rte_eth_dev_configure(), it has been enabled on all queues,
1663 * so there is no need to enable it in this queue again.
1664 * The local_conf.offloads input to underlying PMD only carries
1665 * those offloadings which are only enabled on this queue and
1666 * not enabled on all queues.
1668 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1671 * New added offloadings for this queue are those not enabled in
1672 * rte_eth_dev_configure() and they must be per-queue type.
1673 * A pure per-port offloading can't be enabled on a queue while
1674 * disabled on another queue. A pure per-port offloading can't
1675 * be enabled for any queue as new added one if it hasn't been
1676 * enabled in rte_eth_dev_configure().
1678 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1679 local_conf.offloads) {
1681 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1682 "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1683 port_id, rx_queue_id, local_conf.offloads,
1684 dev_info.rx_queue_offload_capa,
1689 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1690 socket_id, &local_conf, mp);
1692 if (!dev->data->min_rx_buf_size ||
1693 dev->data->min_rx_buf_size > mbp_buf_size)
1694 dev->data->min_rx_buf_size = mbp_buf_size;
1697 return eth_err(port_id, ret);
1701 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1702 uint16_t nb_tx_desc, unsigned int socket_id,
1703 const struct rte_eth_txconf *tx_conf)
1705 struct rte_eth_dev *dev;
1706 struct rte_eth_dev_info dev_info;
1707 struct rte_eth_txconf local_conf;
1710 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1712 dev = &rte_eth_devices[port_id];
1713 if (tx_queue_id >= dev->data->nb_tx_queues) {
1714 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1718 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1721 rte_eth_dev_info_get(port_id, &dev_info);
1723 /* Use default specified by driver, if nb_tx_desc is zero */
1724 if (nb_tx_desc == 0) {
1725 nb_tx_desc = dev_info.default_txportconf.ring_size;
1726 /* If driver default is zero, fall back on EAL default */
1727 if (nb_tx_desc == 0)
1728 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1730 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1731 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1732 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1734 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1735 nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1736 dev_info.tx_desc_lim.nb_min,
1737 dev_info.tx_desc_lim.nb_align);
1741 if (dev->data->dev_started &&
1742 !(dev_info.dev_capa &
1743 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1746 if (dev->data->dev_started &&
1747 (dev->data->tx_queue_state[tx_queue_id] !=
1748 RTE_ETH_QUEUE_STATE_STOPPED))
1751 txq = dev->data->tx_queues;
1752 if (txq[tx_queue_id]) {
1753 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1755 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1756 txq[tx_queue_id] = NULL;
1759 if (tx_conf == NULL)
1760 tx_conf = &dev_info.default_txconf;
1762 local_conf = *tx_conf;
1765 * If an offloading has already been enabled in
1766 * rte_eth_dev_configure(), it has been enabled on all queues,
1767 * so there is no need to enable it in this queue again.
1768 * The local_conf.offloads input to underlying PMD only carries
1769 * those offloadings which are only enabled on this queue and
1770 * not enabled on all queues.
1772 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1775 * New added offloadings for this queue are those not enabled in
1776 * rte_eth_dev_configure() and they must be per-queue type.
1777 * A pure per-port offloading can't be enabled on a queue while
1778 * disabled on another queue. A pure per-port offloading can't
1779 * be enabled for any queue as new added one if it hasn't been
1780 * enabled in rte_eth_dev_configure().
1782 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1783 local_conf.offloads) {
1785 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1786 "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1787 port_id, tx_queue_id, local_conf.offloads,
1788 dev_info.tx_queue_offload_capa,
1793 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1794 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1798 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1799 void *userdata __rte_unused)
1803 for (i = 0; i < unsent; i++)
1804 rte_pktmbuf_free(pkts[i]);
1808 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1811 uint64_t *count = userdata;
1814 for (i = 0; i < unsent; i++)
1815 rte_pktmbuf_free(pkts[i]);
1821 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1822 buffer_tx_error_fn cbfn, void *userdata)
1824 buffer->error_callback = cbfn;
1825 buffer->error_userdata = userdata;
1830 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1837 buffer->size = size;
1838 if (buffer->error_callback == NULL) {
1839 ret = rte_eth_tx_buffer_set_err_callback(
1840 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1847 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1849 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1852 /* Validate Input Data. Bail if not valid or not supported. */
1853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1854 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1856 /* Call driver to free pending mbufs. */
1857 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1859 return eth_err(port_id, ret);
1863 rte_eth_promiscuous_enable(uint16_t port_id)
1865 struct rte_eth_dev *dev;
1867 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1868 dev = &rte_eth_devices[port_id];
1870 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1871 (*dev->dev_ops->promiscuous_enable)(dev);
1872 dev->data->promiscuous = 1;
1876 rte_eth_promiscuous_disable(uint16_t port_id)
1878 struct rte_eth_dev *dev;
1880 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1881 dev = &rte_eth_devices[port_id];
1883 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1884 dev->data->promiscuous = 0;
1885 (*dev->dev_ops->promiscuous_disable)(dev);
1889 rte_eth_promiscuous_get(uint16_t port_id)
1891 struct rte_eth_dev *dev;
1893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1895 dev = &rte_eth_devices[port_id];
1896 return dev->data->promiscuous;
1900 rte_eth_allmulticast_enable(uint16_t port_id)
1902 struct rte_eth_dev *dev;
1904 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1905 dev = &rte_eth_devices[port_id];
1907 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1908 (*dev->dev_ops->allmulticast_enable)(dev);
1909 dev->data->all_multicast = 1;
1913 rte_eth_allmulticast_disable(uint16_t port_id)
1915 struct rte_eth_dev *dev;
1917 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1918 dev = &rte_eth_devices[port_id];
1920 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1921 dev->data->all_multicast = 0;
1922 (*dev->dev_ops->allmulticast_disable)(dev);
1926 rte_eth_allmulticast_get(uint16_t port_id)
1928 struct rte_eth_dev *dev;
1930 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1932 dev = &rte_eth_devices[port_id];
1933 return dev->data->all_multicast;
1937 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1939 struct rte_eth_dev *dev;
1941 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1942 dev = &rte_eth_devices[port_id];
1944 if (dev->data->dev_conf.intr_conf.lsc &&
1945 dev->data->dev_started)
1946 rte_eth_linkstatus_get(dev, eth_link);
1948 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1949 (*dev->dev_ops->link_update)(dev, 1);
1950 *eth_link = dev->data->dev_link;
1955 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1957 struct rte_eth_dev *dev;
1959 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1960 dev = &rte_eth_devices[port_id];
1962 if (dev->data->dev_conf.intr_conf.lsc &&
1963 dev->data->dev_started)
1964 rte_eth_linkstatus_get(dev, eth_link);
1966 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1967 (*dev->dev_ops->link_update)(dev, 0);
1968 *eth_link = dev->data->dev_link;
1973 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1975 struct rte_eth_dev *dev;
1977 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1979 dev = &rte_eth_devices[port_id];
1980 memset(stats, 0, sizeof(*stats));
1982 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1983 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1984 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1988 rte_eth_stats_reset(uint16_t port_id)
1990 struct rte_eth_dev *dev;
1992 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1993 dev = &rte_eth_devices[port_id];
1995 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1996 (*dev->dev_ops->stats_reset)(dev);
1997 dev->data->rx_mbuf_alloc_failed = 0;
2003 get_xstats_basic_count(struct rte_eth_dev *dev)
2005 uint16_t nb_rxqs, nb_txqs;
2008 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2009 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2011 count = RTE_NB_STATS;
2012 count += nb_rxqs * RTE_NB_RXQ_STATS;
2013 count += nb_txqs * RTE_NB_TXQ_STATS;
2019 get_xstats_count(uint16_t port_id)
2021 struct rte_eth_dev *dev;
2024 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2025 dev = &rte_eth_devices[port_id];
2026 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2027 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2030 return eth_err(port_id, count);
2032 if (dev->dev_ops->xstats_get_names != NULL) {
2033 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2035 return eth_err(port_id, count);
2040 count += get_xstats_basic_count(dev);
2046 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2049 int cnt_xstats, idx_xstat;
2051 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2054 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2059 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2064 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2065 if (cnt_xstats < 0) {
2066 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2070 /* Get id-name lookup table */
2071 struct rte_eth_xstat_name xstats_names[cnt_xstats];
2073 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2074 port_id, xstats_names, cnt_xstats, NULL)) {
2075 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2079 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2080 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2089 /* retrieve basic stats names */
2091 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2092 struct rte_eth_xstat_name *xstats_names)
2094 int cnt_used_entries = 0;
2095 uint32_t idx, id_queue;
2098 for (idx = 0; idx < RTE_NB_STATS; idx++) {
2099 snprintf(xstats_names[cnt_used_entries].name,
2100 sizeof(xstats_names[0].name),
2101 "%s", rte_stats_strings[idx].name);
2104 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2105 for (id_queue = 0; id_queue < num_q; id_queue++) {
2106 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2107 snprintf(xstats_names[cnt_used_entries].name,
2108 sizeof(xstats_names[0].name),
2110 id_queue, rte_rxq_stats_strings[idx].name);
2115 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2116 for (id_queue = 0; id_queue < num_q; id_queue++) {
2117 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2118 snprintf(xstats_names[cnt_used_entries].name,
2119 sizeof(xstats_names[0].name),
2121 id_queue, rte_txq_stats_strings[idx].name);
2125 return cnt_used_entries;
2128 /* retrieve ethdev extended statistics names */
2130 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2131 struct rte_eth_xstat_name *xstats_names, unsigned int size,
2134 struct rte_eth_xstat_name *xstats_names_copy;
2135 unsigned int no_basic_stat_requested = 1;
2136 unsigned int no_ext_stat_requested = 1;
2137 unsigned int expected_entries;
2138 unsigned int basic_count;
2139 struct rte_eth_dev *dev;
2143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2144 dev = &rte_eth_devices[port_id];
2146 basic_count = get_xstats_basic_count(dev);
2147 ret = get_xstats_count(port_id);
2150 expected_entries = (unsigned int)ret;
2152 /* Return max number of stats if no ids given */
2155 return expected_entries;
2156 else if (xstats_names && size < expected_entries)
2157 return expected_entries;
2160 if (ids && !xstats_names)
2163 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2164 uint64_t ids_copy[size];
2166 for (i = 0; i < size; i++) {
2167 if (ids[i] < basic_count) {
2168 no_basic_stat_requested = 0;
2173 * Convert ids to xstats ids that PMD knows.
2174 * ids known by user are basic + extended stats.
2176 ids_copy[i] = ids[i] - basic_count;
2179 if (no_basic_stat_requested)
2180 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2181 xstats_names, ids_copy, size);
2184 /* Retrieve all stats */
2186 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2188 if (num_stats < 0 || num_stats > (int)expected_entries)
2191 return expected_entries;
2194 xstats_names_copy = calloc(expected_entries,
2195 sizeof(struct rte_eth_xstat_name));
2197 if (!xstats_names_copy) {
2198 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2203 for (i = 0; i < size; i++) {
2204 if (ids[i] >= basic_count) {
2205 no_ext_stat_requested = 0;
2211 /* Fill xstats_names_copy structure */
2212 if (ids && no_ext_stat_requested) {
2213 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2215 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2218 free(xstats_names_copy);
2224 for (i = 0; i < size; i++) {
2225 if (ids[i] >= expected_entries) {
2226 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2227 free(xstats_names_copy);
2230 xstats_names[i] = xstats_names_copy[ids[i]];
2233 free(xstats_names_copy);
2238 rte_eth_xstats_get_names(uint16_t port_id,
2239 struct rte_eth_xstat_name *xstats_names,
2242 struct rte_eth_dev *dev;
2243 int cnt_used_entries;
2244 int cnt_expected_entries;
2245 int cnt_driver_entries;
2247 cnt_expected_entries = get_xstats_count(port_id);
2248 if (xstats_names == NULL || cnt_expected_entries < 0 ||
2249 (int)size < cnt_expected_entries)
2250 return cnt_expected_entries;
2252 /* port_id checked in get_xstats_count() */
2253 dev = &rte_eth_devices[port_id];
2255 cnt_used_entries = rte_eth_basic_stats_get_names(
2258 if (dev->dev_ops->xstats_get_names != NULL) {
2259 /* If there are any driver-specific xstats, append them
2262 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2264 xstats_names + cnt_used_entries,
2265 size - cnt_used_entries);
2266 if (cnt_driver_entries < 0)
2267 return eth_err(port_id, cnt_driver_entries);
2268 cnt_used_entries += cnt_driver_entries;
2271 return cnt_used_entries;
2276 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2278 struct rte_eth_dev *dev;
2279 struct rte_eth_stats eth_stats;
2280 unsigned int count = 0, i, q;
2281 uint64_t val, *stats_ptr;
2282 uint16_t nb_rxqs, nb_txqs;
2285 ret = rte_eth_stats_get(port_id, ð_stats);
2289 dev = &rte_eth_devices[port_id];
2291 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2292 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2295 for (i = 0; i < RTE_NB_STATS; i++) {
2296 stats_ptr = RTE_PTR_ADD(ð_stats,
2297 rte_stats_strings[i].offset);
2299 xstats[count++].value = val;
2303 for (q = 0; q < nb_rxqs; q++) {
2304 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2305 stats_ptr = RTE_PTR_ADD(ð_stats,
2306 rte_rxq_stats_strings[i].offset +
2307 q * sizeof(uint64_t));
2309 xstats[count++].value = val;
2314 for (q = 0; q < nb_txqs; q++) {
2315 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2316 stats_ptr = RTE_PTR_ADD(ð_stats,
2317 rte_txq_stats_strings[i].offset +
2318 q * sizeof(uint64_t));
2320 xstats[count++].value = val;
2326 /* retrieve ethdev extended statistics */
2328 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2329 uint64_t *values, unsigned int size)
2331 unsigned int no_basic_stat_requested = 1;
2332 unsigned int no_ext_stat_requested = 1;
2333 unsigned int num_xstats_filled;
2334 unsigned int basic_count;
2335 uint16_t expected_entries;
2336 struct rte_eth_dev *dev;
2340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341 ret = get_xstats_count(port_id);
2344 expected_entries = (uint16_t)ret;
2345 struct rte_eth_xstat xstats[expected_entries];
2346 dev = &rte_eth_devices[port_id];
2347 basic_count = get_xstats_basic_count(dev);
2349 /* Return max number of stats if no ids given */
2352 return expected_entries;
2353 else if (values && size < expected_entries)
2354 return expected_entries;
2360 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2361 unsigned int basic_count = get_xstats_basic_count(dev);
2362 uint64_t ids_copy[size];
2364 for (i = 0; i < size; i++) {
2365 if (ids[i] < basic_count) {
2366 no_basic_stat_requested = 0;
2371 * Convert ids to xstats ids that PMD knows.
2372 * ids known by user are basic + extended stats.
2374 ids_copy[i] = ids[i] - basic_count;
2377 if (no_basic_stat_requested)
2378 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2383 for (i = 0; i < size; i++) {
2384 if (ids[i] >= basic_count) {
2385 no_ext_stat_requested = 0;
2391 /* Fill the xstats structure */
2392 if (ids && no_ext_stat_requested)
2393 ret = rte_eth_basic_stats_get(port_id, xstats);
2395 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2399 num_xstats_filled = (unsigned int)ret;
2401 /* Return all stats */
2403 for (i = 0; i < num_xstats_filled; i++)
2404 values[i] = xstats[i].value;
2405 return expected_entries;
2409 for (i = 0; i < size; i++) {
2410 if (ids[i] >= expected_entries) {
2411 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2414 values[i] = xstats[ids[i]].value;
2420 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2423 struct rte_eth_dev *dev;
2424 unsigned int count = 0, i;
2425 signed int xcount = 0;
2426 uint16_t nb_rxqs, nb_txqs;
2429 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2431 dev = &rte_eth_devices[port_id];
2433 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2434 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2436 /* Return generic statistics */
2437 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2438 (nb_txqs * RTE_NB_TXQ_STATS);
2440 /* implemented by the driver */
2441 if (dev->dev_ops->xstats_get != NULL) {
2442 /* Retrieve the xstats from the driver at the end of the
2445 xcount = (*dev->dev_ops->xstats_get)(dev,
2446 xstats ? xstats + count : NULL,
2447 (n > count) ? n - count : 0);
2450 return eth_err(port_id, xcount);
2453 if (n < count + xcount || xstats == NULL)
2454 return count + xcount;
2456 /* now fill the xstats structure */
2457 ret = rte_eth_basic_stats_get(port_id, xstats);
2462 for (i = 0; i < count; i++)
2464 /* add an offset to driver-specific stats */
2465 for ( ; i < count + xcount; i++)
2466 xstats[i].id += count;
2468 return count + xcount;
2471 /* reset ethdev extended statistics */
2473 rte_eth_xstats_reset(uint16_t port_id)
2475 struct rte_eth_dev *dev;
2477 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2478 dev = &rte_eth_devices[port_id];
2480 /* implemented by the driver */
2481 if (dev->dev_ops->xstats_reset != NULL) {
2482 (*dev->dev_ops->xstats_reset)(dev);
2486 /* fallback to default */
2487 rte_eth_stats_reset(port_id);
2491 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2494 struct rte_eth_dev *dev;
2496 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2498 dev = &rte_eth_devices[port_id];
2500 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2502 if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2505 if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2508 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2511 return (*dev->dev_ops->queue_stats_mapping_set)
2512 (dev, queue_id, stat_idx, is_rx);
2517 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2520 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2521 stat_idx, STAT_QMAP_TX));
2526 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2529 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2530 stat_idx, STAT_QMAP_RX));
2534 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2536 struct rte_eth_dev *dev;
2538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2539 dev = &rte_eth_devices[port_id];
2541 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2542 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2543 fw_version, fw_size));
2547 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2549 struct rte_eth_dev *dev;
2550 const struct rte_eth_desc_lim lim = {
2551 .nb_max = UINT16_MAX,
2556 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2557 dev = &rte_eth_devices[port_id];
2559 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2560 dev_info->rx_desc_lim = lim;
2561 dev_info->tx_desc_lim = lim;
2562 dev_info->device = dev->device;
2564 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2565 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2566 dev_info->driver_name = dev->device->driver->name;
2567 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2568 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2570 dev_info->dev_flags = &dev->data->dev_flags;
2574 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2575 uint32_t *ptypes, int num)
2578 struct rte_eth_dev *dev;
2579 const uint32_t *all_ptypes;
2581 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2582 dev = &rte_eth_devices[port_id];
2583 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2584 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2589 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2590 if (all_ptypes[i] & ptype_mask) {
2592 ptypes[j] = all_ptypes[i];
2600 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2602 struct rte_eth_dev *dev;
2604 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2605 dev = &rte_eth_devices[port_id];
2606 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2611 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2613 struct rte_eth_dev *dev;
2615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2617 dev = &rte_eth_devices[port_id];
2618 *mtu = dev->data->mtu;
2623 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2626 struct rte_eth_dev *dev;
2628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2629 dev = &rte_eth_devices[port_id];
2630 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2632 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2634 dev->data->mtu = mtu;
2636 return eth_err(port_id, ret);
2640 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2642 struct rte_eth_dev *dev;
2645 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2646 dev = &rte_eth_devices[port_id];
2647 if (!(dev->data->dev_conf.rxmode.offloads &
2648 DEV_RX_OFFLOAD_VLAN_FILTER)) {
2649 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2654 if (vlan_id > 4095) {
2655 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2659 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2661 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2663 struct rte_vlan_filter_conf *vfc;
2667 vfc = &dev->data->vlan_filter_conf;
2668 vidx = vlan_id / 64;
2669 vbit = vlan_id % 64;
2672 vfc->ids[vidx] |= UINT64_C(1) << vbit;
2674 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2677 return eth_err(port_id, ret);
2681 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2684 struct rte_eth_dev *dev;
2686 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2687 dev = &rte_eth_devices[port_id];
2688 if (rx_queue_id >= dev->data->nb_rx_queues) {
2689 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2693 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2694 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2700 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2701 enum rte_vlan_type vlan_type,
2704 struct rte_eth_dev *dev;
2706 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2707 dev = &rte_eth_devices[port_id];
2708 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2710 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2715 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2717 struct rte_eth_dev *dev;
2721 uint64_t orig_offloads;
2723 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2724 dev = &rte_eth_devices[port_id];
2726 /* save original values in case of failure */
2727 orig_offloads = dev->data->dev_conf.rxmode.offloads;
2729 /*check which option changed by application*/
2730 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2731 org = !!(dev->data->dev_conf.rxmode.offloads &
2732 DEV_RX_OFFLOAD_VLAN_STRIP);
2735 dev->data->dev_conf.rxmode.offloads |=
2736 DEV_RX_OFFLOAD_VLAN_STRIP;
2738 dev->data->dev_conf.rxmode.offloads &=
2739 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2740 mask |= ETH_VLAN_STRIP_MASK;
2743 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2744 org = !!(dev->data->dev_conf.rxmode.offloads &
2745 DEV_RX_OFFLOAD_VLAN_FILTER);
2748 dev->data->dev_conf.rxmode.offloads |=
2749 DEV_RX_OFFLOAD_VLAN_FILTER;
2751 dev->data->dev_conf.rxmode.offloads &=
2752 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2753 mask |= ETH_VLAN_FILTER_MASK;
2756 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2757 org = !!(dev->data->dev_conf.rxmode.offloads &
2758 DEV_RX_OFFLOAD_VLAN_EXTEND);
2761 dev->data->dev_conf.rxmode.offloads |=
2762 DEV_RX_OFFLOAD_VLAN_EXTEND;
2764 dev->data->dev_conf.rxmode.offloads &=
2765 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2766 mask |= ETH_VLAN_EXTEND_MASK;
2773 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2774 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2776 /* hit an error restore original values */
2777 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2780 return eth_err(port_id, ret);
2784 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2786 struct rte_eth_dev *dev;
2789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2790 dev = &rte_eth_devices[port_id];
2792 if (dev->data->dev_conf.rxmode.offloads &
2793 DEV_RX_OFFLOAD_VLAN_STRIP)
2794 ret |= ETH_VLAN_STRIP_OFFLOAD;
2796 if (dev->data->dev_conf.rxmode.offloads &
2797 DEV_RX_OFFLOAD_VLAN_FILTER)
2798 ret |= ETH_VLAN_FILTER_OFFLOAD;
2800 if (dev->data->dev_conf.rxmode.offloads &
2801 DEV_RX_OFFLOAD_VLAN_EXTEND)
2802 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2808 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2810 struct rte_eth_dev *dev;
2812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2813 dev = &rte_eth_devices[port_id];
2814 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2816 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2820 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2822 struct rte_eth_dev *dev;
2824 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825 dev = &rte_eth_devices[port_id];
2826 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2827 memset(fc_conf, 0, sizeof(*fc_conf));
2828 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2832 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2834 struct rte_eth_dev *dev;
2836 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2837 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2838 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2842 dev = &rte_eth_devices[port_id];
2843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2844 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2848 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2849 struct rte_eth_pfc_conf *pfc_conf)
2851 struct rte_eth_dev *dev;
2853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2854 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2855 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2859 dev = &rte_eth_devices[port_id];
2860 /* High water, low water validation are device specific */
2861 if (*dev->dev_ops->priority_flow_ctrl_set)
2862 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2868 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2876 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2877 for (i = 0; i < num; i++) {
2878 if (reta_conf[i].mask)
2886 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2890 uint16_t i, idx, shift;
2896 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2900 for (i = 0; i < reta_size; i++) {
2901 idx = i / RTE_RETA_GROUP_SIZE;
2902 shift = i % RTE_RETA_GROUP_SIZE;
2903 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2904 (reta_conf[idx].reta[shift] >= max_rxq)) {
2906 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
2908 reta_conf[idx].reta[shift], max_rxq);
2917 rte_eth_dev_rss_reta_update(uint16_t port_id,
2918 struct rte_eth_rss_reta_entry64 *reta_conf,
2921 struct rte_eth_dev *dev;
2924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2925 /* Check mask bits */
2926 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2930 dev = &rte_eth_devices[port_id];
2932 /* Check entry value */
2933 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2934 dev->data->nb_rx_queues);
2938 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2939 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2944 rte_eth_dev_rss_reta_query(uint16_t port_id,
2945 struct rte_eth_rss_reta_entry64 *reta_conf,
2948 struct rte_eth_dev *dev;
2951 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2953 /* Check mask bits */
2954 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2958 dev = &rte_eth_devices[port_id];
2959 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2960 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2965 rte_eth_dev_rss_hash_update(uint16_t port_id,
2966 struct rte_eth_rss_conf *rss_conf)
2968 struct rte_eth_dev *dev;
2969 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2971 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2972 dev = &rte_eth_devices[port_id];
2973 rte_eth_dev_info_get(port_id, &dev_info);
2974 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2975 dev_info.flow_type_rss_offloads) {
2977 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2978 port_id, rss_conf->rss_hf,
2979 dev_info.flow_type_rss_offloads);
2982 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2983 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2988 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2989 struct rte_eth_rss_conf *rss_conf)
2991 struct rte_eth_dev *dev;
2993 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2994 dev = &rte_eth_devices[port_id];
2995 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2996 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3001 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3002 struct rte_eth_udp_tunnel *udp_tunnel)
3004 struct rte_eth_dev *dev;
3006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3007 if (udp_tunnel == NULL) {
3008 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3012 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3013 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3017 dev = &rte_eth_devices[port_id];
3018 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3019 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3024 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3025 struct rte_eth_udp_tunnel *udp_tunnel)
3027 struct rte_eth_dev *dev;
3029 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3030 dev = &rte_eth_devices[port_id];
3032 if (udp_tunnel == NULL) {
3033 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3037 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3038 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3042 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3043 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3048 rte_eth_led_on(uint16_t port_id)
3050 struct rte_eth_dev *dev;
3052 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3053 dev = &rte_eth_devices[port_id];
3054 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3055 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3059 rte_eth_led_off(uint16_t port_id)
3061 struct rte_eth_dev *dev;
3063 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3064 dev = &rte_eth_devices[port_id];
3065 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3066 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3070 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3074 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3076 struct rte_eth_dev_info dev_info;
3077 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3080 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3081 rte_eth_dev_info_get(port_id, &dev_info);
3083 for (i = 0; i < dev_info.max_mac_addrs; i++)
3084 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
3090 static const struct ether_addr null_mac_addr;
3093 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
3096 struct rte_eth_dev *dev;
3101 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3102 dev = &rte_eth_devices[port_id];
3103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3105 if (is_zero_ether_addr(addr)) {
3106 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3110 if (pool >= ETH_64_POOLS) {
3111 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3115 index = get_mac_addr_index(port_id, addr);
3117 index = get_mac_addr_index(port_id, &null_mac_addr);
3119 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3124 pool_mask = dev->data->mac_pool_sel[index];
3126 /* Check if both MAC address and pool is already there, and do nothing */
3127 if (pool_mask & (1ULL << pool))
3132 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3135 /* Update address in NIC data structure */
3136 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3138 /* Update pool bitmap in NIC data structure */
3139 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3142 return eth_err(port_id, ret);
3146 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3148 struct rte_eth_dev *dev;
3151 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152 dev = &rte_eth_devices[port_id];
3153 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3155 index = get_mac_addr_index(port_id, addr);
3158 "Port %u: Cannot remove default MAC address\n",
3161 } else if (index < 0)
3162 return 0; /* Do nothing if address wasn't found */
3165 (*dev->dev_ops->mac_addr_remove)(dev, index);
3167 /* Update address in NIC data structure */
3168 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3170 /* reset pool bitmap */
3171 dev->data->mac_pool_sel[index] = 0;
3177 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3179 struct rte_eth_dev *dev;
3182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3184 if (!is_valid_assigned_ether_addr(addr))
3187 dev = &rte_eth_devices[port_id];
3188 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3190 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3194 /* Update default address in NIC data structure */
3195 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3202 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3206 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3208 struct rte_eth_dev_info dev_info;
3209 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3212 rte_eth_dev_info_get(port_id, &dev_info);
3213 if (!dev->data->hash_mac_addrs)
3216 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3217 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3218 ETHER_ADDR_LEN) == 0)
3225 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3230 struct rte_eth_dev *dev;
3232 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3234 dev = &rte_eth_devices[port_id];
3235 if (is_zero_ether_addr(addr)) {
3236 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3241 index = get_hash_mac_addr_index(port_id, addr);
3242 /* Check if it's already there, and do nothing */
3243 if ((index >= 0) && on)
3249 "Port %u: the MAC address was not set in UTA\n",
3254 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3256 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3262 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3263 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3265 /* Update address in NIC data structure */
3267 ether_addr_copy(addr,
3268 &dev->data->hash_mac_addrs[index]);
3270 ether_addr_copy(&null_mac_addr,
3271 &dev->data->hash_mac_addrs[index]);
3274 return eth_err(port_id, ret);
3278 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3280 struct rte_eth_dev *dev;
3282 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3284 dev = &rte_eth_devices[port_id];
3286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3287 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3291 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3294 struct rte_eth_dev *dev;
3295 struct rte_eth_dev_info dev_info;
3296 struct rte_eth_link link;
3298 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3300 dev = &rte_eth_devices[port_id];
3301 rte_eth_dev_info_get(port_id, &dev_info);
3302 link = dev->data->dev_link;
3304 if (queue_idx > dev_info.max_tx_queues) {
3306 "Set queue rate limit:port %u: invalid queue id=%u\n",
3307 port_id, queue_idx);
3311 if (tx_rate > link.link_speed) {
3313 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3314 tx_rate, link.link_speed);
3318 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3319 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3320 queue_idx, tx_rate));
3324 rte_eth_mirror_rule_set(uint16_t port_id,
3325 struct rte_eth_mirror_conf *mirror_conf,
3326 uint8_t rule_id, uint8_t on)
3328 struct rte_eth_dev *dev;
3330 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3331 if (mirror_conf->rule_type == 0) {
3332 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3336 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3337 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3342 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3343 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3344 (mirror_conf->pool_mask == 0)) {
3346 "Invalid mirror pool, pool mask can not be 0\n");
3350 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3351 mirror_conf->vlan.vlan_mask == 0) {
3353 "Invalid vlan mask, vlan mask can not be 0\n");
3357 dev = &rte_eth_devices[port_id];
3358 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3360 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3361 mirror_conf, rule_id, on));
3365 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3367 struct rte_eth_dev *dev;
3369 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3371 dev = &rte_eth_devices[port_id];
3372 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3374 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3378 RTE_INIT(eth_dev_init_cb_lists)
3382 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3383 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3387 rte_eth_dev_callback_register(uint16_t port_id,
3388 enum rte_eth_event_type event,
3389 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3391 struct rte_eth_dev *dev;
3392 struct rte_eth_dev_callback *user_cb;
3393 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3399 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3400 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3404 if (port_id == RTE_ETH_ALL) {
3406 last_port = RTE_MAX_ETHPORTS - 1;
3408 next_port = last_port = port_id;
3411 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3414 dev = &rte_eth_devices[next_port];
3416 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3417 if (user_cb->cb_fn == cb_fn &&
3418 user_cb->cb_arg == cb_arg &&
3419 user_cb->event == event) {
3424 /* create a new callback. */
3425 if (user_cb == NULL) {
3426 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3427 sizeof(struct rte_eth_dev_callback), 0);
3428 if (user_cb != NULL) {
3429 user_cb->cb_fn = cb_fn;
3430 user_cb->cb_arg = cb_arg;
3431 user_cb->event = event;
3432 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3435 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3436 rte_eth_dev_callback_unregister(port_id, event,
3442 } while (++next_port <= last_port);
3444 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3449 rte_eth_dev_callback_unregister(uint16_t port_id,
3450 enum rte_eth_event_type event,
3451 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3454 struct rte_eth_dev *dev;
3455 struct rte_eth_dev_callback *cb, *next;
3456 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3462 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3463 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3467 if (port_id == RTE_ETH_ALL) {
3469 last_port = RTE_MAX_ETHPORTS - 1;
3471 next_port = last_port = port_id;
3474 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3477 dev = &rte_eth_devices[next_port];
3479 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3482 next = TAILQ_NEXT(cb, next);
3484 if (cb->cb_fn != cb_fn || cb->event != event ||
3485 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3489 * if this callback is not executing right now,
3492 if (cb->active == 0) {
3493 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3499 } while (++next_port <= last_port);
3501 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3506 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3507 enum rte_eth_event_type event, void *ret_param)
3509 struct rte_eth_dev_callback *cb_lst;
3510 struct rte_eth_dev_callback dev_cb;
3513 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3514 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3515 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3519 if (ret_param != NULL)
3520 dev_cb.ret_param = ret_param;
3522 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3523 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3524 dev_cb.cb_arg, dev_cb.ret_param);
3525 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3528 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3533 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3538 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3540 dev->state = RTE_ETH_DEV_ATTACHED;
3544 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3547 struct rte_eth_dev *dev;
3548 struct rte_intr_handle *intr_handle;
3552 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3554 dev = &rte_eth_devices[port_id];
3556 if (!dev->intr_handle) {
3557 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3561 intr_handle = dev->intr_handle;
3562 if (!intr_handle->intr_vec) {
3563 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3567 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3568 vec = intr_handle->intr_vec[qid];
3569 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3570 if (rc && rc != -EEXIST) {
3572 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3573 port_id, qid, op, epfd, vec);
3580 int __rte_experimental
3581 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3583 struct rte_intr_handle *intr_handle;
3584 struct rte_eth_dev *dev;
3585 unsigned int efd_idx;
3589 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3591 dev = &rte_eth_devices[port_id];
3593 if (queue_id >= dev->data->nb_rx_queues) {
3594 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3598 if (!dev->intr_handle) {
3599 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3603 intr_handle = dev->intr_handle;
3604 if (!intr_handle->intr_vec) {
3605 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3609 vec = intr_handle->intr_vec[queue_id];
3610 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3611 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3612 fd = intr_handle->efds[efd_idx];
3617 const struct rte_memzone *
3618 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3619 uint16_t queue_id, size_t size, unsigned align,
3622 char z_name[RTE_MEMZONE_NAMESIZE];
3623 const struct rte_memzone *mz;
3625 snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3626 dev->data->port_id, queue_id, ring_name);
3628 mz = rte_memzone_lookup(z_name);
3632 return rte_memzone_reserve_aligned(z_name, size, socket_id,
3633 RTE_MEMZONE_IOVA_CONTIG, align);
3636 int __rte_experimental
3637 rte_eth_dev_create(struct rte_device *device, const char *name,
3638 size_t priv_data_size,
3639 ethdev_bus_specific_init ethdev_bus_specific_init,
3640 void *bus_init_params,
3641 ethdev_init_t ethdev_init, void *init_params)
3643 struct rte_eth_dev *ethdev;
3646 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3648 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3649 ethdev = rte_eth_dev_allocate(name);
3653 if (priv_data_size) {
3654 ethdev->data->dev_private = rte_zmalloc_socket(
3655 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3658 if (!ethdev->data->dev_private) {
3659 RTE_LOG(ERR, EAL, "failed to allocate private data");
3665 ethdev = rte_eth_dev_attach_secondary(name);
3667 RTE_LOG(ERR, EAL, "secondary process attach failed, "
3668 "ethdev doesn't exist");
3673 ethdev->device = device;
3675 if (ethdev_bus_specific_init) {
3676 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3679 "ethdev bus specific initialisation failed");
3684 retval = ethdev_init(ethdev, init_params);
3686 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3690 rte_eth_dev_probing_finish(ethdev);
3695 rte_eth_dev_release_port(ethdev);
3699 int __rte_experimental
3700 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3701 ethdev_uninit_t ethdev_uninit)
3705 ethdev = rte_eth_dev_allocated(ethdev->data->name);
3709 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3710 if (ethdev_uninit) {
3711 ret = ethdev_uninit(ethdev);
3716 return rte_eth_dev_release_port(ethdev);
3720 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3721 int epfd, int op, void *data)
3724 struct rte_eth_dev *dev;
3725 struct rte_intr_handle *intr_handle;
3728 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3730 dev = &rte_eth_devices[port_id];
3731 if (queue_id >= dev->data->nb_rx_queues) {
3732 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3736 if (!dev->intr_handle) {
3737 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3741 intr_handle = dev->intr_handle;
3742 if (!intr_handle->intr_vec) {
3743 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3747 vec = intr_handle->intr_vec[queue_id];
3748 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3749 if (rc && rc != -EEXIST) {
3751 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3752 port_id, queue_id, op, epfd, vec);
3760 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3763 struct rte_eth_dev *dev;
3765 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3767 dev = &rte_eth_devices[port_id];
3769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3770 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3775 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3778 struct rte_eth_dev *dev;
3780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3782 dev = &rte_eth_devices[port_id];
3784 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3785 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3791 rte_eth_dev_filter_supported(uint16_t port_id,
3792 enum rte_filter_type filter_type)
3794 struct rte_eth_dev *dev;
3796 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3798 dev = &rte_eth_devices[port_id];
3799 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3800 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3801 RTE_ETH_FILTER_NOP, NULL);
3805 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3806 enum rte_filter_op filter_op, void *arg)
3808 struct rte_eth_dev *dev;
3810 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3812 dev = &rte_eth_devices[port_id];
3813 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3814 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3818 const struct rte_eth_rxtx_callback *
3819 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3820 rte_rx_callback_fn fn, void *user_param)
3822 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3823 rte_errno = ENOTSUP;
3826 /* check input parameters */
3827 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3828 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3832 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3840 cb->param = user_param;
3842 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3843 /* Add the callbacks in fifo order. */
3844 struct rte_eth_rxtx_callback *tail =
3845 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3848 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3855 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3860 const struct rte_eth_rxtx_callback *
3861 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3862 rte_rx_callback_fn fn, void *user_param)
3864 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3865 rte_errno = ENOTSUP;
3868 /* check input parameters */
3869 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3870 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3875 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3883 cb->param = user_param;
3885 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3886 /* Add the callbacks at fisrt position*/
3887 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3889 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3890 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3895 const struct rte_eth_rxtx_callback *
3896 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3897 rte_tx_callback_fn fn, void *user_param)
3899 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3900 rte_errno = ENOTSUP;
3903 /* check input parameters */
3904 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3905 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3910 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3918 cb->param = user_param;
3920 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3921 /* Add the callbacks in fifo order. */
3922 struct rte_eth_rxtx_callback *tail =
3923 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3926 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3933 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3939 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3940 const struct rte_eth_rxtx_callback *user_cb)
3942 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3945 /* Check input parameters. */
3946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3947 if (user_cb == NULL ||
3948 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3951 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3952 struct rte_eth_rxtx_callback *cb;
3953 struct rte_eth_rxtx_callback **prev_cb;
3956 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3957 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3958 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3960 if (cb == user_cb) {
3961 /* Remove the user cb from the callback list. */
3962 *prev_cb = cb->next;
3967 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3973 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3974 const struct rte_eth_rxtx_callback *user_cb)
3976 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3979 /* Check input parameters. */
3980 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3981 if (user_cb == NULL ||
3982 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3985 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3987 struct rte_eth_rxtx_callback *cb;
3988 struct rte_eth_rxtx_callback **prev_cb;
3990 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3991 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3992 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3994 if (cb == user_cb) {
3995 /* Remove the user cb from the callback list. */
3996 *prev_cb = cb->next;
4001 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4007 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4008 struct rte_eth_rxq_info *qinfo)
4010 struct rte_eth_dev *dev;
4012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4017 dev = &rte_eth_devices[port_id];
4018 if (queue_id >= dev->data->nb_rx_queues) {
4019 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4023 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4025 memset(qinfo, 0, sizeof(*qinfo));
4026 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4031 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4032 struct rte_eth_txq_info *qinfo)
4034 struct rte_eth_dev *dev;
4036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4041 dev = &rte_eth_devices[port_id];
4042 if (queue_id >= dev->data->nb_tx_queues) {
4043 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4047 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4049 memset(qinfo, 0, sizeof(*qinfo));
4050 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4056 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4057 struct ether_addr *mc_addr_set,
4058 uint32_t nb_mc_addr)
4060 struct rte_eth_dev *dev;
4062 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4064 dev = &rte_eth_devices[port_id];
4065 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4066 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4067 mc_addr_set, nb_mc_addr));
4071 rte_eth_timesync_enable(uint16_t port_id)
4073 struct rte_eth_dev *dev;
4075 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4076 dev = &rte_eth_devices[port_id];
4078 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4079 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4083 rte_eth_timesync_disable(uint16_t port_id)
4085 struct rte_eth_dev *dev;
4087 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4088 dev = &rte_eth_devices[port_id];
4090 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4091 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4095 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4098 struct rte_eth_dev *dev;
4100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4101 dev = &rte_eth_devices[port_id];
4103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4104 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4105 (dev, timestamp, flags));
4109 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4110 struct timespec *timestamp)
4112 struct rte_eth_dev *dev;
4114 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115 dev = &rte_eth_devices[port_id];
4117 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4118 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4123 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4125 struct rte_eth_dev *dev;
4127 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4128 dev = &rte_eth_devices[port_id];
4130 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4131 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4136 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4138 struct rte_eth_dev *dev;
4140 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4141 dev = &rte_eth_devices[port_id];
4143 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4144 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4149 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4151 struct rte_eth_dev *dev;
4153 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4154 dev = &rte_eth_devices[port_id];
4156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4157 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4162 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4164 struct rte_eth_dev *dev;
4166 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4168 dev = &rte_eth_devices[port_id];
4169 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4170 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4174 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4176 struct rte_eth_dev *dev;
4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4180 dev = &rte_eth_devices[port_id];
4181 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4182 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4186 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4188 struct rte_eth_dev *dev;
4190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4192 dev = &rte_eth_devices[port_id];
4193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4194 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4198 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4200 struct rte_eth_dev *dev;
4202 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4204 dev = &rte_eth_devices[port_id];
4205 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4206 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4209 int __rte_experimental
4210 rte_eth_dev_get_module_info(uint16_t port_id,
4211 struct rte_eth_dev_module_info *modinfo)
4213 struct rte_eth_dev *dev;
4215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4217 dev = &rte_eth_devices[port_id];
4218 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4219 return (*dev->dev_ops->get_module_info)(dev, modinfo);
4222 int __rte_experimental
4223 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4224 struct rte_dev_eeprom_info *info)
4226 struct rte_eth_dev *dev;
4228 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4230 dev = &rte_eth_devices[port_id];
4231 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4232 return (*dev->dev_ops->get_module_eeprom)(dev, info);
4236 rte_eth_dev_get_dcb_info(uint16_t port_id,
4237 struct rte_eth_dcb_info *dcb_info)
4239 struct rte_eth_dev *dev;
4241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4243 dev = &rte_eth_devices[port_id];
4244 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4246 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4247 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4251 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4252 struct rte_eth_l2_tunnel_conf *l2_tunnel)
4254 struct rte_eth_dev *dev;
4256 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4257 if (l2_tunnel == NULL) {
4258 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4262 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4263 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4267 dev = &rte_eth_devices[port_id];
4268 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4270 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4275 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4276 struct rte_eth_l2_tunnel_conf *l2_tunnel,
4280 struct rte_eth_dev *dev;
4282 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4284 if (l2_tunnel == NULL) {
4285 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4289 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4290 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4295 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4299 dev = &rte_eth_devices[port_id];
4300 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4302 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4303 l2_tunnel, mask, en));
4307 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4308 const struct rte_eth_desc_lim *desc_lim)
4310 if (desc_lim->nb_align != 0)
4311 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4313 if (desc_lim->nb_max != 0)
4314 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4316 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4320 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4321 uint16_t *nb_rx_desc,
4322 uint16_t *nb_tx_desc)
4324 struct rte_eth_dev *dev;
4325 struct rte_eth_dev_info dev_info;
4327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4329 dev = &rte_eth_devices[port_id];
4330 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4332 rte_eth_dev_info_get(port_id, &dev_info);
4334 if (nb_rx_desc != NULL)
4335 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4337 if (nb_tx_desc != NULL)
4338 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4344 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4346 struct rte_eth_dev *dev;
4348 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4353 dev = &rte_eth_devices[port_id];
4355 if (*dev->dev_ops->pool_ops_supported == NULL)
4356 return 1; /* all pools are supported */
4358 return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4362 * A set of values to describe the possible states of a switch domain.
4364 enum rte_eth_switch_domain_state {
4365 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4366 RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4370 * Array of switch domains available for allocation. Array is sized to
4371 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4372 * ethdev ports in a single process.
4374 struct rte_eth_dev_switch {
4375 enum rte_eth_switch_domain_state state;
4376 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4378 int __rte_experimental
4379 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4383 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4385 for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4386 i < RTE_MAX_ETHPORTS; i++) {
4387 if (rte_eth_switch_domains[i].state ==
4388 RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4389 rte_eth_switch_domains[i].state =
4390 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4399 int __rte_experimental
4400 rte_eth_switch_domain_free(uint16_t domain_id)
4402 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4403 domain_id >= RTE_MAX_ETHPORTS)
4406 if (rte_eth_switch_domains[domain_id].state !=
4407 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4410 rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4415 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4418 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4421 struct rte_kvargs_pair *pair;
4424 arglist->str = strdup(str_in);
4425 if (arglist->str == NULL)
4428 letter = arglist->str;
4431 pair = &arglist->pairs[0];
4434 case 0: /* Initial */
4437 else if (*letter == '\0')
4444 case 1: /* Parsing key */
4445 if (*letter == '=') {
4447 pair->value = letter + 1;
4449 } else if (*letter == ',' || *letter == '\0')
4454 case 2: /* Parsing value */
4457 else if (*letter == ',') {
4460 pair = &arglist->pairs[arglist->count];
4462 } else if (*letter == '\0') {
4465 pair = &arglist->pairs[arglist->count];
4470 case 3: /* Parsing list */
4473 else if (*letter == '\0')
4482 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4490 /* Single element, not a list */
4491 return callback(str, data);
4493 /* Sanity check, then strip the brackets */
4494 str_start = &str[strlen(str) - 1];
4495 if (*str_start != ']') {
4496 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4502 /* Process list elements */
4512 } else if (state == 1) {
4513 if (*str == ',' || *str == '\0') {
4514 if (str > str_start) {
4515 /* Non-empty string fragment */
4517 result = callback(str_start, data);
4530 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4531 const uint16_t max_list)
4533 uint16_t lo, hi, val;
4536 result = sscanf(str, "%hu-%hu", &lo, &hi);
4538 if (*len_list >= max_list)
4540 list[(*len_list)++] = lo;
4541 } else if (result == 2) {
4542 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4544 for (val = lo; val <= hi; val++) {
4545 if (*len_list >= max_list)
4547 list[(*len_list)++] = val;
4556 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4558 struct rte_eth_devargs *eth_da = data;
4560 return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4561 ð_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4564 int __rte_experimental
4565 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4567 struct rte_kvargs args;
4568 struct rte_kvargs_pair *pair;
4572 memset(eth_da, 0, sizeof(*eth_da));
4574 result = rte_eth_devargs_tokenise(&args, dargs);
4578 for (i = 0; i < args.count; i++) {
4579 pair = &args.pairs[i];
4580 if (strcmp("representor", pair->key) == 0) {
4581 result = rte_eth_devargs_parse_list(pair->value,
4582 rte_eth_devargs_parse_representor_ports,
4596 RTE_INIT(ethdev_init_log)
4598 rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4599 if (rte_eth_dev_logtype >= 0)
4600 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);