4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/types.h>
35 #include <sys/queue.h>
44 #include <netinet/in.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
86 /* store statistics names and its offset in stats structure */
87 struct rte_eth_xstats_name_off {
88 char name[RTE_ETH_XSTATS_NAME_SIZE];
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108 {"errors", offsetof(struct rte_eth_stats, q_errors)},
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
112 sizeof(rte_rxq_stats_strings[0]))
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
119 sizeof(rte_txq_stats_strings[0]))
123 * The user application callback description.
125 * It contains callback address to be registered by user application,
126 * the pointer to the parameters for callback, and the event type.
128 struct rte_eth_dev_callback {
129 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
131 void *cb_arg; /**< Parameter for callback */
132 enum rte_eth_event_type event; /**< Interrupt event type */
133 uint32_t active; /**< Callback is executing */
142 rte_eth_find_next(uint8_t port_id)
144 while (port_id < RTE_MAX_ETHPORTS &&
145 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
148 if (port_id >= RTE_MAX_ETHPORTS)
149 return RTE_MAX_ETHPORTS;
155 rte_eth_dev_data_alloc(void)
157 const unsigned flags = 0;
158 const struct rte_memzone *mz;
160 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
161 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
162 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
163 rte_socket_id(), flags);
165 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
167 rte_panic("Cannot allocate memzone for ethernet port data\n");
169 rte_eth_dev_data = mz->addr;
170 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
171 memset(rte_eth_dev_data, 0,
172 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
176 rte_eth_dev_allocated(const char *name)
180 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
181 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
182 strcmp(rte_eth_devices[i].data->name, name) == 0)
183 return &rte_eth_devices[i];
189 rte_eth_dev_find_free_port(void)
193 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
194 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
197 return RTE_MAX_ETHPORTS;
200 static struct rte_eth_dev *
201 eth_dev_get(uint8_t port_id)
203 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
205 eth_dev->data = &rte_eth_dev_data[port_id];
206 eth_dev->state = RTE_ETH_DEV_ATTACHED;
207 TAILQ_INIT(&(eth_dev->link_intr_cbs));
209 eth_dev_last_created_port = port_id;
216 rte_eth_dev_allocate(const char *name)
219 struct rte_eth_dev *eth_dev;
221 port_id = rte_eth_dev_find_free_port();
222 if (port_id == RTE_MAX_ETHPORTS) {
223 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
227 if (rte_eth_dev_data == NULL)
228 rte_eth_dev_data_alloc();
230 if (rte_eth_dev_allocated(name) != NULL) {
231 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
236 memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
237 eth_dev = eth_dev_get(port_id);
238 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
239 eth_dev->data->port_id = port_id;
240 eth_dev->data->mtu = ETHER_MTU;
246 * Attach to a port already registered by the primary process, which
247 * makes sure that the same device would have the same port id both
248 * in the primary and secondary process.
251 rte_eth_dev_attach_secondary(const char *name)
254 struct rte_eth_dev *eth_dev;
256 if (rte_eth_dev_data == NULL)
257 rte_eth_dev_data_alloc();
259 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
260 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
263 if (i == RTE_MAX_ETHPORTS) {
265 "device %s is not driven by the primary process\n",
270 eth_dev = eth_dev_get(i);
271 RTE_ASSERT(eth_dev->data->port_id == i);
277 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
282 eth_dev->state = RTE_ETH_DEV_UNUSED;
288 rte_eth_dev_is_valid_port(uint8_t port_id)
290 if (port_id >= RTE_MAX_ETHPORTS ||
291 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
298 rte_eth_dev_socket_id(uint8_t port_id)
300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
301 return rte_eth_devices[port_id].data->numa_node;
305 rte_eth_dev_count(void)
311 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
315 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
318 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
322 /* shouldn't check 'rte_eth_devices[i].data',
323 * because it might be overwritten by VDEV PMD */
324 tmp = rte_eth_dev_data[port_id].name;
330 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
335 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
342 RTE_ETH_FOREACH_DEV(i) {
344 rte_eth_dev_data[i].name, strlen(name))) {
355 rte_eth_dev_is_detachable(uint8_t port_id)
359 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
361 switch (rte_eth_devices[port_id].data->kdrv) {
362 case RTE_KDRV_IGB_UIO:
363 case RTE_KDRV_UIO_GENERIC:
364 case RTE_KDRV_NIC_UIO:
371 dev_flags = rte_eth_devices[port_id].data->dev_flags;
372 if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
373 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
379 /* attach the new device, then store port_id of the device */
381 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
384 int current = rte_eth_dev_count();
388 if ((devargs == NULL) || (port_id == NULL)) {
393 /* parse devargs, then retrieve device name and args */
394 if (rte_eal_parse_devargs_str(devargs, &name, &args))
397 ret = rte_eal_dev_attach(name, args);
401 /* no point looking at the port count if no port exists */
402 if (!rte_eth_dev_count()) {
403 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
408 /* if nothing happened, there is a bug here, since some driver told us
409 * it did attach a device, but did not create a port.
411 if (current == rte_eth_dev_count()) {
416 *port_id = eth_dev_last_created_port;
425 /* detach the device, then store the name of the device */
427 rte_eth_dev_detach(uint8_t port_id, char *name)
436 /* FIXME: move this to eal, once device flags are relocated there */
437 if (rte_eth_dev_is_detachable(port_id))
440 snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
441 "%s", rte_eth_devices[port_id].data->name);
442 ret = rte_eal_dev_detach(name);
453 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
455 uint16_t old_nb_queues = dev->data->nb_rx_queues;
459 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
460 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
461 sizeof(dev->data->rx_queues[0]) * nb_queues,
462 RTE_CACHE_LINE_SIZE);
463 if (dev->data->rx_queues == NULL) {
464 dev->data->nb_rx_queues = 0;
467 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
468 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
470 rxq = dev->data->rx_queues;
472 for (i = nb_queues; i < old_nb_queues; i++)
473 (*dev->dev_ops->rx_queue_release)(rxq[i]);
474 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
475 RTE_CACHE_LINE_SIZE);
478 if (nb_queues > old_nb_queues) {
479 uint16_t new_qs = nb_queues - old_nb_queues;
481 memset(rxq + old_nb_queues, 0,
482 sizeof(rxq[0]) * new_qs);
485 dev->data->rx_queues = rxq;
487 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
488 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
490 rxq = dev->data->rx_queues;
492 for (i = nb_queues; i < old_nb_queues; i++)
493 (*dev->dev_ops->rx_queue_release)(rxq[i]);
495 rte_free(dev->data->rx_queues);
496 dev->data->rx_queues = NULL;
498 dev->data->nb_rx_queues = nb_queues;
503 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
505 struct rte_eth_dev *dev;
507 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
509 dev = &rte_eth_devices[port_id];
510 if (rx_queue_id >= dev->data->nb_rx_queues) {
511 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
515 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
517 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
518 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
519 " already started\n",
520 rx_queue_id, port_id);
524 return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
529 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
531 struct rte_eth_dev *dev;
533 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
535 dev = &rte_eth_devices[port_id];
536 if (rx_queue_id >= dev->data->nb_rx_queues) {
537 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
541 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
543 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
544 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
545 " already stopped\n",
546 rx_queue_id, port_id);
550 return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
555 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
557 struct rte_eth_dev *dev;
559 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
561 dev = &rte_eth_devices[port_id];
562 if (tx_queue_id >= dev->data->nb_tx_queues) {
563 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
567 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
569 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
570 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
571 " already started\n",
572 tx_queue_id, port_id);
576 return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
581 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
583 struct rte_eth_dev *dev;
585 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
587 dev = &rte_eth_devices[port_id];
588 if (tx_queue_id >= dev->data->nb_tx_queues) {
589 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
593 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
595 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
596 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
597 " already stopped\n",
598 tx_queue_id, port_id);
602 return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
607 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
609 uint16_t old_nb_queues = dev->data->nb_tx_queues;
613 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
614 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
615 sizeof(dev->data->tx_queues[0]) * nb_queues,
616 RTE_CACHE_LINE_SIZE);
617 if (dev->data->tx_queues == NULL) {
618 dev->data->nb_tx_queues = 0;
621 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
622 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
624 txq = dev->data->tx_queues;
626 for (i = nb_queues; i < old_nb_queues; i++)
627 (*dev->dev_ops->tx_queue_release)(txq[i]);
628 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
629 RTE_CACHE_LINE_SIZE);
632 if (nb_queues > old_nb_queues) {
633 uint16_t new_qs = nb_queues - old_nb_queues;
635 memset(txq + old_nb_queues, 0,
636 sizeof(txq[0]) * new_qs);
639 dev->data->tx_queues = txq;
641 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
642 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
644 txq = dev->data->tx_queues;
646 for (i = nb_queues; i < old_nb_queues; i++)
647 (*dev->dev_ops->tx_queue_release)(txq[i]);
649 rte_free(dev->data->tx_queues);
650 dev->data->tx_queues = NULL;
652 dev->data->nb_tx_queues = nb_queues;
657 rte_eth_speed_bitflag(uint32_t speed, int duplex)
660 case ETH_SPEED_NUM_10M:
661 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
662 case ETH_SPEED_NUM_100M:
663 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
664 case ETH_SPEED_NUM_1G:
665 return ETH_LINK_SPEED_1G;
666 case ETH_SPEED_NUM_2_5G:
667 return ETH_LINK_SPEED_2_5G;
668 case ETH_SPEED_NUM_5G:
669 return ETH_LINK_SPEED_5G;
670 case ETH_SPEED_NUM_10G:
671 return ETH_LINK_SPEED_10G;
672 case ETH_SPEED_NUM_20G:
673 return ETH_LINK_SPEED_20G;
674 case ETH_SPEED_NUM_25G:
675 return ETH_LINK_SPEED_25G;
676 case ETH_SPEED_NUM_40G:
677 return ETH_LINK_SPEED_40G;
678 case ETH_SPEED_NUM_50G:
679 return ETH_LINK_SPEED_50G;
680 case ETH_SPEED_NUM_56G:
681 return ETH_LINK_SPEED_56G;
682 case ETH_SPEED_NUM_100G:
683 return ETH_LINK_SPEED_100G;
690 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
691 const struct rte_eth_conf *dev_conf)
693 struct rte_eth_dev *dev;
694 struct rte_eth_dev_info dev_info;
697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
699 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
701 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
702 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
706 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
708 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
709 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
713 dev = &rte_eth_devices[port_id];
715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
716 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
718 if (dev->data->dev_started) {
720 "port %d must be stopped to allow configuration\n", port_id);
724 /* Copy the dev_conf parameter into the dev structure */
725 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
728 * Check that the numbers of RX and TX queues are not greater
729 * than the maximum number of RX and TX queues supported by the
732 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
734 if (nb_rx_q == 0 && nb_tx_q == 0) {
735 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
739 if (nb_rx_q > dev_info.max_rx_queues) {
740 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
741 port_id, nb_rx_q, dev_info.max_rx_queues);
745 if (nb_tx_q > dev_info.max_tx_queues) {
746 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
747 port_id, nb_tx_q, dev_info.max_tx_queues);
751 /* Check that the device supports requested interrupts */
752 if ((dev_conf->intr_conf.lsc == 1) &&
753 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
754 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
755 dev->data->drv_name);
758 if ((dev_conf->intr_conf.rmv == 1) &&
759 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
760 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
761 dev->data->drv_name);
766 * If jumbo frames are enabled, check that the maximum RX packet
767 * length is supported by the configured device.
769 if (dev_conf->rxmode.jumbo_frame == 1) {
770 if (dev_conf->rxmode.max_rx_pkt_len >
771 dev_info.max_rx_pktlen) {
772 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
773 " > max valid value %u\n",
775 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
776 (unsigned)dev_info.max_rx_pktlen);
778 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
779 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
780 " < min valid value %u\n",
782 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
783 (unsigned)ETHER_MIN_LEN);
787 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
788 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
789 /* Use default value */
790 dev->data->dev_conf.rxmode.max_rx_pkt_len =
795 * Setup new number of RX/TX queues and reconfigure device.
797 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
799 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
804 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
806 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
808 rte_eth_dev_rx_queue_config(dev, 0);
812 diag = (*dev->dev_ops->dev_configure)(dev);
814 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
816 rte_eth_dev_rx_queue_config(dev, 0);
817 rte_eth_dev_tx_queue_config(dev, 0);
825 _rte_eth_dev_reset(struct rte_eth_dev *dev)
827 if (dev->data->dev_started) {
829 "port %d must be stopped to allow reset\n",
834 rte_eth_dev_rx_queue_config(dev, 0);
835 rte_eth_dev_tx_queue_config(dev, 0);
837 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
841 rte_eth_dev_config_restore(uint8_t port_id)
843 struct rte_eth_dev *dev;
844 struct rte_eth_dev_info dev_info;
845 struct ether_addr *addr;
850 dev = &rte_eth_devices[port_id];
852 rte_eth_dev_info_get(port_id, &dev_info);
854 /* replay MAC address configuration including default MAC */
855 addr = &dev->data->mac_addrs[0];
856 if (*dev->dev_ops->mac_addr_set != NULL)
857 (*dev->dev_ops->mac_addr_set)(dev, addr);
858 else if (*dev->dev_ops->mac_addr_add != NULL)
859 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
861 if (*dev->dev_ops->mac_addr_add != NULL) {
862 for (i = 1; i < dev_info.max_mac_addrs; i++) {
863 addr = &dev->data->mac_addrs[i];
865 /* skip zero address */
866 if (is_zero_ether_addr(addr))
870 pool_mask = dev->data->mac_pool_sel[i];
873 if (pool_mask & 1ULL)
874 (*dev->dev_ops->mac_addr_add)(dev,
882 /* replay promiscuous configuration */
883 if (rte_eth_promiscuous_get(port_id) == 1)
884 rte_eth_promiscuous_enable(port_id);
885 else if (rte_eth_promiscuous_get(port_id) == 0)
886 rte_eth_promiscuous_disable(port_id);
888 /* replay all multicast configuration */
889 if (rte_eth_allmulticast_get(port_id) == 1)
890 rte_eth_allmulticast_enable(port_id);
891 else if (rte_eth_allmulticast_get(port_id) == 0)
892 rte_eth_allmulticast_disable(port_id);
896 rte_eth_dev_start(uint8_t port_id)
898 struct rte_eth_dev *dev;
901 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
903 dev = &rte_eth_devices[port_id];
905 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
907 if (dev->data->dev_started != 0) {
908 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
909 " already started\n",
914 diag = (*dev->dev_ops->dev_start)(dev);
916 dev->data->dev_started = 1;
920 rte_eth_dev_config_restore(port_id);
922 if (dev->data->dev_conf.intr_conf.lsc == 0) {
923 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
924 (*dev->dev_ops->link_update)(dev, 0);
930 rte_eth_dev_stop(uint8_t port_id)
932 struct rte_eth_dev *dev;
934 RTE_ETH_VALID_PORTID_OR_RET(port_id);
935 dev = &rte_eth_devices[port_id];
937 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
939 if (dev->data->dev_started == 0) {
940 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
941 " already stopped\n",
946 dev->data->dev_started = 0;
947 (*dev->dev_ops->dev_stop)(dev);
951 rte_eth_dev_set_link_up(uint8_t port_id)
953 struct rte_eth_dev *dev;
955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
957 dev = &rte_eth_devices[port_id];
959 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
960 return (*dev->dev_ops->dev_set_link_up)(dev);
964 rte_eth_dev_set_link_down(uint8_t port_id)
966 struct rte_eth_dev *dev;
968 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
970 dev = &rte_eth_devices[port_id];
972 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
973 return (*dev->dev_ops->dev_set_link_down)(dev);
977 rte_eth_dev_close(uint8_t port_id)
979 struct rte_eth_dev *dev;
981 RTE_ETH_VALID_PORTID_OR_RET(port_id);
982 dev = &rte_eth_devices[port_id];
984 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
985 dev->data->dev_started = 0;
986 (*dev->dev_ops->dev_close)(dev);
988 dev->data->nb_rx_queues = 0;
989 rte_free(dev->data->rx_queues);
990 dev->data->rx_queues = NULL;
991 dev->data->nb_tx_queues = 0;
992 rte_free(dev->data->tx_queues);
993 dev->data->tx_queues = NULL;
997 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
998 uint16_t nb_rx_desc, unsigned int socket_id,
999 const struct rte_eth_rxconf *rx_conf,
1000 struct rte_mempool *mp)
1003 uint32_t mbp_buf_size;
1004 struct rte_eth_dev *dev;
1005 struct rte_eth_dev_info dev_info;
1008 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1010 dev = &rte_eth_devices[port_id];
1011 if (rx_queue_id >= dev->data->nb_rx_queues) {
1012 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1016 if (dev->data->dev_started) {
1017 RTE_PMD_DEBUG_TRACE(
1018 "port %d must be stopped to allow configuration\n", port_id);
1022 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1023 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1026 * Check the size of the mbuf data buffer.
1027 * This value must be provided in the private data of the memory pool.
1028 * First check that the memory pool has a valid private data.
1030 rte_eth_dev_info_get(port_id, &dev_info);
1031 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1032 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1033 mp->name, (int) mp->private_data_size,
1034 (int) sizeof(struct rte_pktmbuf_pool_private));
1037 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1039 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1040 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1041 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1045 (int)(RTE_PKTMBUF_HEADROOM +
1046 dev_info.min_rx_bufsize),
1047 (int)RTE_PKTMBUF_HEADROOM,
1048 (int)dev_info.min_rx_bufsize);
1052 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1053 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1054 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1056 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1057 "should be: <= %hu, = %hu, and a product of %hu\n",
1059 dev_info.rx_desc_lim.nb_max,
1060 dev_info.rx_desc_lim.nb_min,
1061 dev_info.rx_desc_lim.nb_align);
1065 rxq = dev->data->rx_queues;
1066 if (rxq[rx_queue_id]) {
1067 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1069 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1070 rxq[rx_queue_id] = NULL;
1073 if (rx_conf == NULL)
1074 rx_conf = &dev_info.default_rxconf;
1076 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1077 socket_id, rx_conf, mp);
1079 if (!dev->data->min_rx_buf_size ||
1080 dev->data->min_rx_buf_size > mbp_buf_size)
1081 dev->data->min_rx_buf_size = mbp_buf_size;
1088 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1089 uint16_t nb_tx_desc, unsigned int socket_id,
1090 const struct rte_eth_txconf *tx_conf)
1092 struct rte_eth_dev *dev;
1093 struct rte_eth_dev_info dev_info;
1096 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1098 dev = &rte_eth_devices[port_id];
1099 if (tx_queue_id >= dev->data->nb_tx_queues) {
1100 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1104 if (dev->data->dev_started) {
1105 RTE_PMD_DEBUG_TRACE(
1106 "port %d must be stopped to allow configuration\n", port_id);
1110 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1111 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1113 rte_eth_dev_info_get(port_id, &dev_info);
1115 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1116 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1117 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1118 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1119 "should be: <= %hu, = %hu, and a product of %hu\n",
1121 dev_info.tx_desc_lim.nb_max,
1122 dev_info.tx_desc_lim.nb_min,
1123 dev_info.tx_desc_lim.nb_align);
1127 txq = dev->data->tx_queues;
1128 if (txq[tx_queue_id]) {
1129 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1131 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1132 txq[tx_queue_id] = NULL;
1135 if (tx_conf == NULL)
1136 tx_conf = &dev_info.default_txconf;
1138 return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1139 socket_id, tx_conf);
1143 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1144 void *userdata __rte_unused)
1148 for (i = 0; i < unsent; i++)
1149 rte_pktmbuf_free(pkts[i]);
1153 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1156 uint64_t *count = userdata;
1159 for (i = 0; i < unsent; i++)
1160 rte_pktmbuf_free(pkts[i]);
1166 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1167 buffer_tx_error_fn cbfn, void *userdata)
1169 buffer->error_callback = cbfn;
1170 buffer->error_userdata = userdata;
1175 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1182 buffer->size = size;
1183 if (buffer->error_callback == NULL) {
1184 ret = rte_eth_tx_buffer_set_err_callback(
1185 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1192 rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
1194 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1196 /* Validate Input Data. Bail if not valid or not supported. */
1197 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1198 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1200 /* Call driver to free pending mbufs. */
1201 return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1206 rte_eth_promiscuous_enable(uint8_t port_id)
1208 struct rte_eth_dev *dev;
1210 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1211 dev = &rte_eth_devices[port_id];
1213 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1214 (*dev->dev_ops->promiscuous_enable)(dev);
1215 dev->data->promiscuous = 1;
1219 rte_eth_promiscuous_disable(uint8_t port_id)
1221 struct rte_eth_dev *dev;
1223 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1224 dev = &rte_eth_devices[port_id];
1226 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1227 dev->data->promiscuous = 0;
1228 (*dev->dev_ops->promiscuous_disable)(dev);
1232 rte_eth_promiscuous_get(uint8_t port_id)
1234 struct rte_eth_dev *dev;
1236 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1238 dev = &rte_eth_devices[port_id];
1239 return dev->data->promiscuous;
1243 rte_eth_allmulticast_enable(uint8_t port_id)
1245 struct rte_eth_dev *dev;
1247 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1248 dev = &rte_eth_devices[port_id];
1250 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1251 (*dev->dev_ops->allmulticast_enable)(dev);
1252 dev->data->all_multicast = 1;
1256 rte_eth_allmulticast_disable(uint8_t port_id)
1258 struct rte_eth_dev *dev;
1260 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1261 dev = &rte_eth_devices[port_id];
1263 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1264 dev->data->all_multicast = 0;
1265 (*dev->dev_ops->allmulticast_disable)(dev);
1269 rte_eth_allmulticast_get(uint8_t port_id)
1271 struct rte_eth_dev *dev;
1273 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1275 dev = &rte_eth_devices[port_id];
1276 return dev->data->all_multicast;
1280 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1281 struct rte_eth_link *link)
1283 struct rte_eth_link *dst = link;
1284 struct rte_eth_link *src = &(dev->data->dev_link);
1286 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1287 *(uint64_t *)src) == 0)
1294 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1296 struct rte_eth_dev *dev;
1298 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1299 dev = &rte_eth_devices[port_id];
1301 if (dev->data->dev_conf.intr_conf.lsc != 0)
1302 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1304 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1305 (*dev->dev_ops->link_update)(dev, 1);
1306 *eth_link = dev->data->dev_link;
1311 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1313 struct rte_eth_dev *dev;
1315 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1316 dev = &rte_eth_devices[port_id];
1318 if (dev->data->dev_conf.intr_conf.lsc != 0)
1319 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1321 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1322 (*dev->dev_ops->link_update)(dev, 0);
1323 *eth_link = dev->data->dev_link;
1328 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1330 struct rte_eth_dev *dev;
1332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1334 dev = &rte_eth_devices[port_id];
1335 memset(stats, 0, sizeof(*stats));
1337 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1338 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1339 (*dev->dev_ops->stats_get)(dev, stats);
1344 rte_eth_stats_reset(uint8_t port_id)
1346 struct rte_eth_dev *dev;
1348 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1349 dev = &rte_eth_devices[port_id];
1351 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1352 (*dev->dev_ops->stats_reset)(dev);
1353 dev->data->rx_mbuf_alloc_failed = 0;
1357 get_xstats_count(uint8_t port_id)
1359 struct rte_eth_dev *dev;
1362 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1363 dev = &rte_eth_devices[port_id];
1364 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1365 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1370 if (dev->dev_ops->xstats_get_names != NULL) {
1371 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1377 count += RTE_NB_STATS;
1378 count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1380 count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1386 rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
1389 int cnt_xstats, idx_xstat;
1391 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1394 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1399 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1404 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1405 if (cnt_xstats < 0) {
1406 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1410 /* Get id-name lookup table */
1411 struct rte_eth_xstat_name xstats_names[cnt_xstats];
1413 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1414 port_id, xstats_names, cnt_xstats, NULL)) {
1415 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1419 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1420 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1430 rte_eth_xstats_get_names_by_id(uint8_t port_id,
1431 struct rte_eth_xstat_name *xstats_names, unsigned int size,
1434 /* Get all xstats */
1436 struct rte_eth_dev *dev;
1437 int cnt_used_entries;
1438 int cnt_expected_entries;
1439 int cnt_driver_entries;
1440 uint32_t idx, id_queue;
1443 cnt_expected_entries = get_xstats_count(port_id);
1444 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1445 (int)size < cnt_expected_entries)
1446 return cnt_expected_entries;
1448 /* port_id checked in get_xstats_count() */
1449 dev = &rte_eth_devices[port_id];
1450 cnt_used_entries = 0;
1452 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1453 snprintf(xstats_names[cnt_used_entries].name,
1454 sizeof(xstats_names[0].name),
1455 "%s", rte_stats_strings[idx].name);
1458 num_q = RTE_MIN(dev->data->nb_rx_queues,
1459 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1460 for (id_queue = 0; id_queue < num_q; id_queue++) {
1461 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1462 snprintf(xstats_names[cnt_used_entries].name,
1463 sizeof(xstats_names[0].name),
1466 rte_rxq_stats_strings[idx].name);
1471 num_q = RTE_MIN(dev->data->nb_tx_queues,
1472 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1473 for (id_queue = 0; id_queue < num_q; id_queue++) {
1474 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1475 snprintf(xstats_names[cnt_used_entries].name,
1476 sizeof(xstats_names[0].name),
1479 rte_txq_stats_strings[idx].name);
1484 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1485 /* If there are any driver-specific xstats, append them
1488 cnt_driver_entries =
1489 (*dev->dev_ops->xstats_get_names_by_id)(
1491 xstats_names + cnt_used_entries,
1493 size - cnt_used_entries);
1494 if (cnt_driver_entries < 0)
1495 return cnt_driver_entries;
1496 cnt_used_entries += cnt_driver_entries;
1498 } else if (dev->dev_ops->xstats_get_names != NULL) {
1499 /* If there are any driver-specific xstats, append them
1502 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1504 xstats_names + cnt_used_entries,
1505 size - cnt_used_entries);
1506 if (cnt_driver_entries < 0)
1507 return cnt_driver_entries;
1508 cnt_used_entries += cnt_driver_entries;
1511 return cnt_used_entries;
1513 /* Get only xstats given by IDS */
1516 struct rte_eth_xstat_name *xstats_names_copy;
1518 len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1521 malloc(sizeof(struct rte_eth_xstat_name) * len);
1522 if (!xstats_names_copy) {
1523 RTE_PMD_DEBUG_TRACE(
1524 "ERROR: can't allocate memory for values_copy\n");
1525 free(xstats_names_copy);
1529 rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy,
1532 for (i = 0; i < size; i++) {
1533 if (ids[i] >= len) {
1534 RTE_PMD_DEBUG_TRACE(
1535 "ERROR: id value isn't valid\n");
1538 strcpy(xstats_names[i].name,
1539 xstats_names_copy[ids[i]].name);
1541 free(xstats_names_copy);
1547 rte_eth_xstats_get_names(uint8_t port_id,
1548 struct rte_eth_xstat_name *xstats_names,
1551 struct rte_eth_dev *dev;
1552 int cnt_used_entries;
1553 int cnt_expected_entries;
1554 int cnt_driver_entries;
1555 uint32_t idx, id_queue;
1558 cnt_expected_entries = get_xstats_count(port_id);
1559 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1560 (int)size < cnt_expected_entries)
1561 return cnt_expected_entries;
1563 /* port_id checked in get_xstats_count() */
1564 dev = &rte_eth_devices[port_id];
1565 cnt_used_entries = 0;
1567 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1568 snprintf(xstats_names[cnt_used_entries].name,
1569 sizeof(xstats_names[0].name),
1570 "%s", rte_stats_strings[idx].name);
1573 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1574 for (id_queue = 0; id_queue < num_q; id_queue++) {
1575 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1576 snprintf(xstats_names[cnt_used_entries].name,
1577 sizeof(xstats_names[0].name),
1579 id_queue, rte_rxq_stats_strings[idx].name);
1584 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1585 for (id_queue = 0; id_queue < num_q; id_queue++) {
1586 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1587 snprintf(xstats_names[cnt_used_entries].name,
1588 sizeof(xstats_names[0].name),
1590 id_queue, rte_txq_stats_strings[idx].name);
1595 if (dev->dev_ops->xstats_get_names != NULL) {
1596 /* If there are any driver-specific xstats, append them
1599 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1601 xstats_names + cnt_used_entries,
1602 size - cnt_used_entries);
1603 if (cnt_driver_entries < 0)
1604 return cnt_driver_entries;
1605 cnt_used_entries += cnt_driver_entries;
1608 return cnt_used_entries;
1611 /* retrieve ethdev extended statistics */
1613 rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
1616 /* If need all xstats */
1618 struct rte_eth_stats eth_stats;
1619 struct rte_eth_dev *dev;
1620 unsigned int count = 0, i, q;
1621 signed int xcount = 0;
1622 uint64_t val, *stats_ptr;
1623 uint16_t nb_rxqs, nb_txqs;
1625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1626 dev = &rte_eth_devices[port_id];
1628 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
1629 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1630 nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
1631 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1633 /* Return generic statistics */
1634 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1635 (nb_txqs * RTE_NB_TXQ_STATS);
1638 /* implemented by the driver */
1639 if (dev->dev_ops->xstats_get_by_id != NULL) {
1640 /* Retrieve the xstats from the driver at the end of the
1641 * xstats struct. Retrieve all xstats.
1643 xcount = (*dev->dev_ops->xstats_get_by_id)(dev,
1645 values ? values + count : NULL,
1646 (n > count) ? n - count : 0);
1650 /* implemented by the driver */
1651 } else if (dev->dev_ops->xstats_get != NULL) {
1652 /* Retrieve the xstats from the driver at the end of the
1653 * xstats struct. Retrieve all xstats.
1654 * Compatibility for PMD without xstats_get_by_ids
1656 unsigned int size = (n > count) ? n - count : 1;
1657 struct rte_eth_xstat xstats[size];
1659 xcount = (*dev->dev_ops->xstats_get)(dev,
1660 values ? xstats : NULL, size);
1666 for (i = 0 ; i < (unsigned int)xcount; i++)
1667 values[i + count] = xstats[i].value;
1670 if (n < count + xcount || values == NULL)
1671 return count + xcount;
1673 /* now fill the xstats structure */
1675 rte_eth_stats_get(port_id, ð_stats);
1678 for (i = 0; i < RTE_NB_STATS; i++) {
1679 stats_ptr = RTE_PTR_ADD(ð_stats,
1680 rte_stats_strings[i].offset);
1682 values[count++] = val;
1686 for (q = 0; q < nb_rxqs; q++) {
1687 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1688 stats_ptr = RTE_PTR_ADD(ð_stats,
1689 rte_rxq_stats_strings[i].offset +
1690 q * sizeof(uint64_t));
1692 values[count++] = val;
1697 for (q = 0; q < nb_txqs; q++) {
1698 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1699 stats_ptr = RTE_PTR_ADD(ð_stats,
1700 rte_txq_stats_strings[i].offset +
1701 q * sizeof(uint64_t));
1703 values[count++] = val;
1707 return count + xcount;
1709 /* Need only xstats given by IDS array */
1712 uint64_t *values_copy;
1714 size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0);
1716 values_copy = malloc(sizeof(*values_copy) * size);
1718 RTE_PMD_DEBUG_TRACE(
1719 "ERROR: can't allocate memory for values_copy\n");
1723 rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size);
1725 for (i = 0; i < n; i++) {
1726 if (ids[i] >= size) {
1727 RTE_PMD_DEBUG_TRACE(
1728 "ERROR: id value isn't valid\n");
1731 values[i] = values_copy[ids[i]];
1739 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1742 struct rte_eth_stats eth_stats;
1743 struct rte_eth_dev *dev;
1744 unsigned int count = 0, i, q;
1745 signed int xcount = 0;
1746 uint64_t val, *stats_ptr;
1747 uint16_t nb_rxqs, nb_txqs;
1749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1751 dev = &rte_eth_devices[port_id];
1753 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1754 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1756 /* Return generic statistics */
1757 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1758 (nb_txqs * RTE_NB_TXQ_STATS);
1760 /* implemented by the driver */
1761 if (dev->dev_ops->xstats_get != NULL) {
1762 /* Retrieve the xstats from the driver at the end of the
1765 xcount = (*dev->dev_ops->xstats_get)(dev,
1766 xstats ? xstats + count : NULL,
1767 (n > count) ? n - count : 0);
1773 if (n < count + xcount || xstats == NULL)
1774 return count + xcount;
1776 /* now fill the xstats structure */
1778 rte_eth_stats_get(port_id, ð_stats);
1781 for (i = 0; i < RTE_NB_STATS; i++) {
1782 stats_ptr = RTE_PTR_ADD(ð_stats,
1783 rte_stats_strings[i].offset);
1785 xstats[count++].value = val;
1789 for (q = 0; q < nb_rxqs; q++) {
1790 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1791 stats_ptr = RTE_PTR_ADD(ð_stats,
1792 rte_rxq_stats_strings[i].offset +
1793 q * sizeof(uint64_t));
1795 xstats[count++].value = val;
1800 for (q = 0; q < nb_txqs; q++) {
1801 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1802 stats_ptr = RTE_PTR_ADD(ð_stats,
1803 rte_txq_stats_strings[i].offset +
1804 q * sizeof(uint64_t));
1806 xstats[count++].value = val;
1810 for (i = 0; i < count; i++)
1812 /* add an offset to driver-specific stats */
1813 for ( ; i < count + xcount; i++)
1814 xstats[i].id += count;
1816 return count + xcount;
1819 /* reset ethdev extended statistics */
1821 rte_eth_xstats_reset(uint8_t port_id)
1823 struct rte_eth_dev *dev;
1825 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1826 dev = &rte_eth_devices[port_id];
1828 /* implemented by the driver */
1829 if (dev->dev_ops->xstats_reset != NULL) {
1830 (*dev->dev_ops->xstats_reset)(dev);
1834 /* fallback to default */
1835 rte_eth_stats_reset(port_id);
1839 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1842 struct rte_eth_dev *dev;
1844 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1846 dev = &rte_eth_devices[port_id];
1848 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1849 return (*dev->dev_ops->queue_stats_mapping_set)
1850 (dev, queue_id, stat_idx, is_rx);
1855 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1858 return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1864 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1867 return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1872 rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
1874 struct rte_eth_dev *dev;
1876 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1877 dev = &rte_eth_devices[port_id];
1879 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
1880 return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
1884 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1886 struct rte_eth_dev *dev;
1887 const struct rte_eth_desc_lim lim = {
1888 .nb_max = UINT16_MAX,
1893 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1894 dev = &rte_eth_devices[port_id];
1896 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1897 dev_info->rx_desc_lim = lim;
1898 dev_info->tx_desc_lim = lim;
1900 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1901 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1902 dev_info->driver_name = dev->data->drv_name;
1903 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1904 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1908 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1909 uint32_t *ptypes, int num)
1912 struct rte_eth_dev *dev;
1913 const uint32_t *all_ptypes;
1915 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1916 dev = &rte_eth_devices[port_id];
1917 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1918 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1923 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1924 if (all_ptypes[i] & ptype_mask) {
1926 ptypes[j] = all_ptypes[i];
1934 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1936 struct rte_eth_dev *dev;
1938 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1939 dev = &rte_eth_devices[port_id];
1940 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1945 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1947 struct rte_eth_dev *dev;
1949 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1951 dev = &rte_eth_devices[port_id];
1952 *mtu = dev->data->mtu;
1957 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1960 struct rte_eth_dev *dev;
1962 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1963 dev = &rte_eth_devices[port_id];
1964 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1966 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1968 dev->data->mtu = mtu;
1974 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1976 struct rte_eth_dev *dev;
1978 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1979 dev = &rte_eth_devices[port_id];
1980 if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1981 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1985 if (vlan_id > 4095) {
1986 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1987 port_id, (unsigned) vlan_id);
1990 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1992 return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1996 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1998 struct rte_eth_dev *dev;
2000 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2001 dev = &rte_eth_devices[port_id];
2002 if (rx_queue_id >= dev->data->nb_rx_queues) {
2003 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2007 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2008 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2014 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2015 enum rte_vlan_type vlan_type,
2018 struct rte_eth_dev *dev;
2020 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2021 dev = &rte_eth_devices[port_id];
2022 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2024 return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2028 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
2030 struct rte_eth_dev *dev;
2035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2036 dev = &rte_eth_devices[port_id];
2038 /*check which option changed by application*/
2039 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2040 org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
2042 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
2043 mask |= ETH_VLAN_STRIP_MASK;
2046 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2047 org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
2049 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
2050 mask |= ETH_VLAN_FILTER_MASK;
2053 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2054 org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
2056 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
2057 mask |= ETH_VLAN_EXTEND_MASK;
2064 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2065 (*dev->dev_ops->vlan_offload_set)(dev, mask);
2071 rte_eth_dev_get_vlan_offload(uint8_t port_id)
2073 struct rte_eth_dev *dev;
2076 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2077 dev = &rte_eth_devices[port_id];
2079 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2080 ret |= ETH_VLAN_STRIP_OFFLOAD;
2082 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2083 ret |= ETH_VLAN_FILTER_OFFLOAD;
2085 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2086 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2092 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
2094 struct rte_eth_dev *dev;
2096 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2097 dev = &rte_eth_devices[port_id];
2098 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2099 (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2105 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2107 struct rte_eth_dev *dev;
2109 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2110 dev = &rte_eth_devices[port_id];
2111 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2112 memset(fc_conf, 0, sizeof(*fc_conf));
2113 return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2117 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2119 struct rte_eth_dev *dev;
2121 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2122 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2123 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2127 dev = &rte_eth_devices[port_id];
2128 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2129 return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2133 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2135 struct rte_eth_dev *dev;
2137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2138 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2139 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2143 dev = &rte_eth_devices[port_id];
2144 /* High water, low water validation are device specific */
2145 if (*dev->dev_ops->priority_flow_ctrl_set)
2146 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2151 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2159 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2160 for (i = 0; i < num; i++) {
2161 if (reta_conf[i].mask)
2169 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2173 uint16_t i, idx, shift;
2179 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2183 for (i = 0; i < reta_size; i++) {
2184 idx = i / RTE_RETA_GROUP_SIZE;
2185 shift = i % RTE_RETA_GROUP_SIZE;
2186 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2187 (reta_conf[idx].reta[shift] >= max_rxq)) {
2188 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2189 "the maximum rxq index: %u\n", idx, shift,
2190 reta_conf[idx].reta[shift], max_rxq);
2199 rte_eth_dev_rss_reta_update(uint8_t port_id,
2200 struct rte_eth_rss_reta_entry64 *reta_conf,
2203 struct rte_eth_dev *dev;
2206 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2207 /* Check mask bits */
2208 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2212 dev = &rte_eth_devices[port_id];
2214 /* Check entry value */
2215 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2216 dev->data->nb_rx_queues);
2220 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2221 return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2225 rte_eth_dev_rss_reta_query(uint8_t port_id,
2226 struct rte_eth_rss_reta_entry64 *reta_conf,
2229 struct rte_eth_dev *dev;
2232 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2234 /* Check mask bits */
2235 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2239 dev = &rte_eth_devices[port_id];
2240 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2241 return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2245 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2247 struct rte_eth_dev *dev;
2248 uint16_t rss_hash_protos;
2250 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251 rss_hash_protos = rss_conf->rss_hf;
2252 if ((rss_hash_protos != 0) &&
2253 ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2254 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2258 dev = &rte_eth_devices[port_id];
2259 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2260 return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2264 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2265 struct rte_eth_rss_conf *rss_conf)
2267 struct rte_eth_dev *dev;
2269 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2270 dev = &rte_eth_devices[port_id];
2271 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2272 return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2276 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
2277 struct rte_eth_udp_tunnel *udp_tunnel)
2279 struct rte_eth_dev *dev;
2281 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2282 if (udp_tunnel == NULL) {
2283 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2287 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2288 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2292 dev = &rte_eth_devices[port_id];
2293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2294 return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2298 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2299 struct rte_eth_udp_tunnel *udp_tunnel)
2301 struct rte_eth_dev *dev;
2303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2304 dev = &rte_eth_devices[port_id];
2306 if (udp_tunnel == NULL) {
2307 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2311 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2312 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2316 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2317 return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2321 rte_eth_led_on(uint8_t port_id)
2323 struct rte_eth_dev *dev;
2325 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2326 dev = &rte_eth_devices[port_id];
2327 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2328 return (*dev->dev_ops->dev_led_on)(dev);
2332 rte_eth_led_off(uint8_t port_id)
2334 struct rte_eth_dev *dev;
2336 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2337 dev = &rte_eth_devices[port_id];
2338 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2339 return (*dev->dev_ops->dev_led_off)(dev);
2343 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2347 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2349 struct rte_eth_dev_info dev_info;
2350 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2353 rte_eth_dev_info_get(port_id, &dev_info);
2355 for (i = 0; i < dev_info.max_mac_addrs; i++)
2356 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2362 static const struct ether_addr null_mac_addr;
2365 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2368 struct rte_eth_dev *dev;
2373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374 dev = &rte_eth_devices[port_id];
2375 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2377 if (is_zero_ether_addr(addr)) {
2378 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2382 if (pool >= ETH_64_POOLS) {
2383 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2387 index = get_mac_addr_index(port_id, addr);
2389 index = get_mac_addr_index(port_id, &null_mac_addr);
2391 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2396 pool_mask = dev->data->mac_pool_sel[index];
2398 /* Check if both MAC address and pool is already there, and do nothing */
2399 if (pool_mask & (1ULL << pool))
2404 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2407 /* Update address in NIC data structure */
2408 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2410 /* Update pool bitmap in NIC data structure */
2411 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2418 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2420 struct rte_eth_dev *dev;
2423 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2424 dev = &rte_eth_devices[port_id];
2425 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2427 index = get_mac_addr_index(port_id, addr);
2429 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2431 } else if (index < 0)
2432 return 0; /* Do nothing if address wasn't found */
2435 (*dev->dev_ops->mac_addr_remove)(dev, index);
2437 /* Update address in NIC data structure */
2438 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2440 /* reset pool bitmap */
2441 dev->data->mac_pool_sel[index] = 0;
2447 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2449 struct rte_eth_dev *dev;
2451 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2453 if (!is_valid_assigned_ether_addr(addr))
2456 dev = &rte_eth_devices[port_id];
2457 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2459 /* Update default address in NIC data structure */
2460 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2462 (*dev->dev_ops->mac_addr_set)(dev, addr);
2469 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2473 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2475 struct rte_eth_dev_info dev_info;
2476 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2479 rte_eth_dev_info_get(port_id, &dev_info);
2480 if (!dev->data->hash_mac_addrs)
2483 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2484 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2485 ETHER_ADDR_LEN) == 0)
2492 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2497 struct rte_eth_dev *dev;
2499 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2501 dev = &rte_eth_devices[port_id];
2502 if (is_zero_ether_addr(addr)) {
2503 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2508 index = get_hash_mac_addr_index(port_id, addr);
2509 /* Check if it's already there, and do nothing */
2510 if ((index >= 0) && (on))
2515 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2516 "set in UTA\n", port_id);
2520 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2522 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2528 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2529 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2531 /* Update address in NIC data structure */
2533 ether_addr_copy(addr,
2534 &dev->data->hash_mac_addrs[index]);
2536 ether_addr_copy(&null_mac_addr,
2537 &dev->data->hash_mac_addrs[index]);
2544 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2546 struct rte_eth_dev *dev;
2548 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2550 dev = &rte_eth_devices[port_id];
2552 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2553 return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2556 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2559 struct rte_eth_dev *dev;
2560 struct rte_eth_dev_info dev_info;
2561 struct rte_eth_link link;
2563 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2565 dev = &rte_eth_devices[port_id];
2566 rte_eth_dev_info_get(port_id, &dev_info);
2567 link = dev->data->dev_link;
2569 if (queue_idx > dev_info.max_tx_queues) {
2570 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2571 "invalid queue id=%d\n", port_id, queue_idx);
2575 if (tx_rate > link.link_speed) {
2576 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2577 "bigger than link speed= %d\n",
2578 tx_rate, link.link_speed);
2582 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2583 return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2587 rte_eth_mirror_rule_set(uint8_t port_id,
2588 struct rte_eth_mirror_conf *mirror_conf,
2589 uint8_t rule_id, uint8_t on)
2591 struct rte_eth_dev *dev;
2593 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2594 if (mirror_conf->rule_type == 0) {
2595 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2599 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2600 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2605 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2606 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2607 (mirror_conf->pool_mask == 0)) {
2608 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2612 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2613 mirror_conf->vlan.vlan_mask == 0) {
2614 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2618 dev = &rte_eth_devices[port_id];
2619 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2621 return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2625 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2627 struct rte_eth_dev *dev;
2629 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2631 dev = &rte_eth_devices[port_id];
2632 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2634 return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2638 rte_eth_dev_callback_register(uint8_t port_id,
2639 enum rte_eth_event_type event,
2640 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2642 struct rte_eth_dev *dev;
2643 struct rte_eth_dev_callback *user_cb;
2648 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2650 dev = &rte_eth_devices[port_id];
2651 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2653 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2654 if (user_cb->cb_fn == cb_fn &&
2655 user_cb->cb_arg == cb_arg &&
2656 user_cb->event == event) {
2661 /* create a new callback. */
2662 if (user_cb == NULL) {
2663 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2664 sizeof(struct rte_eth_dev_callback), 0);
2665 if (user_cb != NULL) {
2666 user_cb->cb_fn = cb_fn;
2667 user_cb->cb_arg = cb_arg;
2668 user_cb->event = event;
2669 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2673 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2674 return (user_cb == NULL) ? -ENOMEM : 0;
2678 rte_eth_dev_callback_unregister(uint8_t port_id,
2679 enum rte_eth_event_type event,
2680 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2683 struct rte_eth_dev *dev;
2684 struct rte_eth_dev_callback *cb, *next;
2689 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2691 dev = &rte_eth_devices[port_id];
2692 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2695 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2697 next = TAILQ_NEXT(cb, next);
2699 if (cb->cb_fn != cb_fn || cb->event != event ||
2700 (cb->cb_arg != (void *)-1 &&
2701 cb->cb_arg != cb_arg))
2705 * if this callback is not executing right now,
2708 if (cb->active == 0) {
2709 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2716 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2721 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2722 enum rte_eth_event_type event, void *cb_arg)
2724 struct rte_eth_dev_callback *cb_lst;
2725 struct rte_eth_dev_callback dev_cb;
2727 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2728 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2729 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2734 dev_cb.cb_arg = cb_arg;
2736 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2737 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2739 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2742 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2746 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2749 struct rte_eth_dev *dev;
2750 struct rte_intr_handle *intr_handle;
2754 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2756 dev = &rte_eth_devices[port_id];
2758 if (!dev->intr_handle) {
2759 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2763 intr_handle = dev->intr_handle;
2764 if (!intr_handle->intr_vec) {
2765 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2769 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2770 vec = intr_handle->intr_vec[qid];
2771 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2772 if (rc && rc != -EEXIST) {
2773 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2774 " op %d epfd %d vec %u\n",
2775 port_id, qid, op, epfd, vec);
2782 const struct rte_memzone *
2783 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2784 uint16_t queue_id, size_t size, unsigned align,
2787 char z_name[RTE_MEMZONE_NAMESIZE];
2788 const struct rte_memzone *mz;
2790 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2791 dev->data->drv_name, ring_name,
2792 dev->data->port_id, queue_id);
2794 mz = rte_memzone_lookup(z_name);
2798 if (rte_xen_dom0_supported())
2799 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2800 0, align, RTE_PGSIZE_2M);
2802 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2807 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2808 int epfd, int op, void *data)
2811 struct rte_eth_dev *dev;
2812 struct rte_intr_handle *intr_handle;
2815 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2817 dev = &rte_eth_devices[port_id];
2818 if (queue_id >= dev->data->nb_rx_queues) {
2819 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2823 if (!dev->intr_handle) {
2824 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2828 intr_handle = dev->intr_handle;
2829 if (!intr_handle->intr_vec) {
2830 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2834 vec = intr_handle->intr_vec[queue_id];
2835 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2836 if (rc && rc != -EEXIST) {
2837 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2838 " op %d epfd %d vec %u\n",
2839 port_id, queue_id, op, epfd, vec);
2847 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2850 struct rte_eth_dev *dev;
2852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2854 dev = &rte_eth_devices[port_id];
2856 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2857 return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2861 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2864 struct rte_eth_dev *dev;
2866 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2868 dev = &rte_eth_devices[port_id];
2870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2871 return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2874 #ifdef RTE_NIC_BYPASS
2875 int rte_eth_dev_bypass_init(uint8_t port_id)
2877 struct rte_eth_dev *dev;
2879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2881 dev = &rte_eth_devices[port_id];
2882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2883 (*dev->dev_ops->bypass_init)(dev);
2888 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2890 struct rte_eth_dev *dev;
2892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2894 dev = &rte_eth_devices[port_id];
2895 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2896 (*dev->dev_ops->bypass_state_show)(dev, state);
2901 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2903 struct rte_eth_dev *dev;
2905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2907 dev = &rte_eth_devices[port_id];
2908 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2909 (*dev->dev_ops->bypass_state_set)(dev, new_state);
2914 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2916 struct rte_eth_dev *dev;
2918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920 dev = &rte_eth_devices[port_id];
2921 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2922 (*dev->dev_ops->bypass_event_show)(dev, event, state);
2927 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2929 struct rte_eth_dev *dev;
2931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2933 dev = &rte_eth_devices[port_id];
2935 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2936 (*dev->dev_ops->bypass_event_set)(dev, event, state);
2941 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2943 struct rte_eth_dev *dev;
2945 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2947 dev = &rte_eth_devices[port_id];
2949 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2950 (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2955 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2957 struct rte_eth_dev *dev;
2959 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2961 dev = &rte_eth_devices[port_id];
2963 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2964 (*dev->dev_ops->bypass_ver_show)(dev, ver);
2969 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2971 struct rte_eth_dev *dev;
2973 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2975 dev = &rte_eth_devices[port_id];
2977 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2978 (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2983 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2985 struct rte_eth_dev *dev;
2987 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2989 dev = &rte_eth_devices[port_id];
2991 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2992 (*dev->dev_ops->bypass_wd_reset)(dev);
2998 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3000 struct rte_eth_dev *dev;
3002 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3004 dev = &rte_eth_devices[port_id];
3005 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3006 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3007 RTE_ETH_FILTER_NOP, NULL);
3011 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3012 enum rte_filter_op filter_op, void *arg)
3014 struct rte_eth_dev *dev;
3016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3018 dev = &rte_eth_devices[port_id];
3019 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3020 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3024 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3025 rte_rx_callback_fn fn, void *user_param)
3027 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3028 rte_errno = ENOTSUP;
3031 /* check input parameters */
3032 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3033 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3037 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3045 cb->param = user_param;
3047 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3048 /* Add the callbacks in fifo order. */
3049 struct rte_eth_rxtx_callback *tail =
3050 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3053 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3060 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3066 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
3067 rte_rx_callback_fn fn, void *user_param)
3069 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3070 rte_errno = ENOTSUP;
3073 /* check input parameters */
3074 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3075 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3080 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3088 cb->param = user_param;
3090 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3091 /* Add the callbacks at fisrt position*/
3092 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3094 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3095 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3101 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3102 rte_tx_callback_fn fn, void *user_param)
3104 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3105 rte_errno = ENOTSUP;
3108 /* check input parameters */
3109 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3110 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3115 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3123 cb->param = user_param;
3125 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3126 /* Add the callbacks in fifo order. */
3127 struct rte_eth_rxtx_callback *tail =
3128 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3131 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3138 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3144 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3145 struct rte_eth_rxtx_callback *user_cb)
3147 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3150 /* Check input parameters. */
3151 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3152 if (user_cb == NULL ||
3153 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3156 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3157 struct rte_eth_rxtx_callback *cb;
3158 struct rte_eth_rxtx_callback **prev_cb;
3161 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3162 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3163 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3165 if (cb == user_cb) {
3166 /* Remove the user cb from the callback list. */
3167 *prev_cb = cb->next;
3172 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3178 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3179 struct rte_eth_rxtx_callback *user_cb)
3181 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3184 /* Check input parameters. */
3185 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3186 if (user_cb == NULL ||
3187 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3190 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3192 struct rte_eth_rxtx_callback *cb;
3193 struct rte_eth_rxtx_callback **prev_cb;
3195 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3196 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3197 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3199 if (cb == user_cb) {
3200 /* Remove the user cb from the callback list. */
3201 *prev_cb = cb->next;
3206 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3212 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3213 struct rte_eth_rxq_info *qinfo)
3215 struct rte_eth_dev *dev;
3217 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3222 dev = &rte_eth_devices[port_id];
3223 if (queue_id >= dev->data->nb_rx_queues) {
3224 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3228 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3230 memset(qinfo, 0, sizeof(*qinfo));
3231 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3236 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3237 struct rte_eth_txq_info *qinfo)
3239 struct rte_eth_dev *dev;
3241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3246 dev = &rte_eth_devices[port_id];
3247 if (queue_id >= dev->data->nb_tx_queues) {
3248 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3252 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3254 memset(qinfo, 0, sizeof(*qinfo));
3255 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3260 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3261 struct ether_addr *mc_addr_set,
3262 uint32_t nb_mc_addr)
3264 struct rte_eth_dev *dev;
3266 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3268 dev = &rte_eth_devices[port_id];
3269 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3270 return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3274 rte_eth_timesync_enable(uint8_t port_id)
3276 struct rte_eth_dev *dev;
3278 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3279 dev = &rte_eth_devices[port_id];
3281 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3282 return (*dev->dev_ops->timesync_enable)(dev);
3286 rte_eth_timesync_disable(uint8_t port_id)
3288 struct rte_eth_dev *dev;
3290 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3291 dev = &rte_eth_devices[port_id];
3293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3294 return (*dev->dev_ops->timesync_disable)(dev);
3298 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3301 struct rte_eth_dev *dev;
3303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3304 dev = &rte_eth_devices[port_id];
3306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3307 return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3311 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3313 struct rte_eth_dev *dev;
3315 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3316 dev = &rte_eth_devices[port_id];
3318 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3319 return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3323 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3325 struct rte_eth_dev *dev;
3327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3328 dev = &rte_eth_devices[port_id];
3330 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3331 return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3335 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3337 struct rte_eth_dev *dev;
3339 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3340 dev = &rte_eth_devices[port_id];
3342 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3343 return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3347 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3349 struct rte_eth_dev *dev;
3351 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3352 dev = &rte_eth_devices[port_id];
3354 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3355 return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3359 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3361 struct rte_eth_dev *dev;
3363 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3365 dev = &rte_eth_devices[port_id];
3366 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3367 return (*dev->dev_ops->get_reg)(dev, info);
3371 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3373 struct rte_eth_dev *dev;
3375 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377 dev = &rte_eth_devices[port_id];
3378 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3379 return (*dev->dev_ops->get_eeprom_length)(dev);
3383 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3385 struct rte_eth_dev *dev;
3387 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389 dev = &rte_eth_devices[port_id];
3390 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3391 return (*dev->dev_ops->get_eeprom)(dev, info);
3395 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3397 struct rte_eth_dev *dev;
3399 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3401 dev = &rte_eth_devices[port_id];
3402 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3403 return (*dev->dev_ops->set_eeprom)(dev, info);
3407 rte_eth_dev_get_dcb_info(uint8_t port_id,
3408 struct rte_eth_dcb_info *dcb_info)
3410 struct rte_eth_dev *dev;
3412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3414 dev = &rte_eth_devices[port_id];
3415 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3417 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3418 return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3422 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3423 struct rte_eth_l2_tunnel_conf *l2_tunnel)
3425 struct rte_eth_dev *dev;
3427 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3428 if (l2_tunnel == NULL) {
3429 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3433 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3434 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3438 dev = &rte_eth_devices[port_id];
3439 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3441 return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3445 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3446 struct rte_eth_l2_tunnel_conf *l2_tunnel,
3450 struct rte_eth_dev *dev;
3452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3454 if (l2_tunnel == NULL) {
3455 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3459 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3460 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3465 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3469 dev = &rte_eth_devices[port_id];
3470 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3472 return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);