1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static struct rte_eth_dev_data *rte_eth_dev_data;
47 static uint8_t eth_dev_last_created_port;
49 /* spinlock for eth device callbacks */
50 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
52 /* spinlock for add/remove rx callbacks */
53 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55 /* spinlock for add/remove tx callbacks */
56 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 /* store statistics names and its offset in stats structure */
59 struct rte_eth_xstats_name_off {
60 char name[RTE_ETH_XSTATS_NAME_SIZE];
64 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
65 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
66 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
67 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
68 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
69 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
70 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
71 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
72 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
76 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
78 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
79 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
80 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
81 {"errors", offsetof(struct rte_eth_stats, q_errors)},
84 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
85 sizeof(rte_rxq_stats_strings[0]))
87 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
88 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
89 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
91 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
92 sizeof(rte_txq_stats_strings[0]))
94 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
95 { DEV_RX_OFFLOAD_##_name, #_name }
100 } rte_rx_offload_names[] = {
101 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
102 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
103 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
104 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
105 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
106 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
107 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
108 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
109 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
110 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
111 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
112 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
113 RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
114 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
115 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
116 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
119 #undef RTE_RX_OFFLOAD_BIT2STR
121 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
122 { DEV_TX_OFFLOAD_##_name, #_name }
124 static const struct {
127 } rte_tx_offload_names[] = {
128 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
129 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
130 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
131 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
132 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
133 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
134 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
135 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
136 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
137 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
138 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
139 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
140 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
141 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
142 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
143 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
144 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
145 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
148 #undef RTE_TX_OFFLOAD_BIT2STR
151 * The user application callback description.
153 * It contains callback address to be registered by user application,
154 * the pointer to the parameters for callback, and the event type.
156 struct rte_eth_dev_callback {
157 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
158 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
159 void *cb_arg; /**< Parameter for callback */
160 void *ret_param; /**< Return parameter */
161 enum rte_eth_event_type event; /**< Interrupt event type */
162 uint32_t active; /**< Callback is executing */
171 rte_eth_find_next(uint16_t port_id)
173 while (port_id < RTE_MAX_ETHPORTS &&
174 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
175 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
178 if (port_id >= RTE_MAX_ETHPORTS)
179 return RTE_MAX_ETHPORTS;
185 rte_eth_dev_data_alloc(void)
187 const unsigned flags = 0;
188 const struct rte_memzone *mz;
190 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
191 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
192 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
193 rte_socket_id(), flags);
195 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
197 rte_panic("Cannot allocate memzone for ethernet port data\n");
199 rte_eth_dev_data = mz->addr;
200 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
201 memset(rte_eth_dev_data, 0,
202 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
206 rte_eth_dev_allocated(const char *name)
210 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
211 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
212 strcmp(rte_eth_devices[i].data->name, name) == 0)
213 return &rte_eth_devices[i];
219 rte_eth_dev_find_free_port(void)
223 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
224 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
227 return RTE_MAX_ETHPORTS;
230 static struct rte_eth_dev *
231 eth_dev_get(uint16_t port_id)
233 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
235 eth_dev->data = &rte_eth_dev_data[port_id];
236 eth_dev->state = RTE_ETH_DEV_ATTACHED;
238 eth_dev_last_created_port = port_id;
244 rte_eth_dev_allocate(const char *name)
247 struct rte_eth_dev *eth_dev;
249 port_id = rte_eth_dev_find_free_port();
250 if (port_id == RTE_MAX_ETHPORTS) {
251 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
255 if (rte_eth_dev_data == NULL)
256 rte_eth_dev_data_alloc();
258 if (rte_eth_dev_allocated(name) != NULL) {
259 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
264 memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
265 eth_dev = eth_dev_get(port_id);
266 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
267 eth_dev->data->port_id = port_id;
268 eth_dev->data->mtu = ETHER_MTU;
270 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
276 * Attach to a port already registered by the primary process, which
277 * makes sure that the same device would have the same port id both
278 * in the primary and secondary process.
281 rte_eth_dev_attach_secondary(const char *name)
284 struct rte_eth_dev *eth_dev;
286 if (rte_eth_dev_data == NULL)
287 rte_eth_dev_data_alloc();
289 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
290 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
293 if (i == RTE_MAX_ETHPORTS) {
295 "device %s is not driven by the primary process\n",
300 eth_dev = eth_dev_get(i);
301 RTE_ASSERT(eth_dev->data->port_id == i);
307 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
312 eth_dev->state = RTE_ETH_DEV_UNUSED;
314 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
320 rte_eth_dev_is_valid_port(uint16_t port_id)
322 if (port_id >= RTE_MAX_ETHPORTS ||
323 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
330 rte_eth_dev_socket_id(uint16_t port_id)
332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
333 return rte_eth_devices[port_id].data->numa_node;
337 rte_eth_dev_get_sec_ctx(uint8_t port_id)
339 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
340 return rte_eth_devices[port_id].security_ctx;
344 rte_eth_dev_count(void)
351 RTE_ETH_FOREACH_DEV(p)
358 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
362 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
365 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
369 /* shouldn't check 'rte_eth_devices[i].data',
370 * because it might be overwritten by VDEV PMD */
371 tmp = rte_eth_dev_data[port_id].name;
377 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
382 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
386 RTE_ETH_FOREACH_DEV(i) {
388 rte_eth_dev_data[i].name, strlen(name))) {
399 eth_err(uint16_t port_id, int ret)
403 if (rte_eth_dev_is_removed(port_id))
408 /* attach the new device, then store port_id of the device */
410 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
413 int current = rte_eth_dev_count();
417 if ((devargs == NULL) || (port_id == NULL)) {
422 /* parse devargs, then retrieve device name and args */
423 if (rte_eal_parse_devargs_str(devargs, &name, &args))
426 ret = rte_eal_dev_attach(name, args);
430 /* no point looking at the port count if no port exists */
431 if (!rte_eth_dev_count()) {
432 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
437 /* if nothing happened, there is a bug here, since some driver told us
438 * it did attach a device, but did not create a port.
440 if (current == rte_eth_dev_count()) {
445 *port_id = eth_dev_last_created_port;
454 /* detach the device, then store the name of the device */
456 rte_eth_dev_detach(uint16_t port_id, char *name)
461 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
468 dev_flags = rte_eth_devices[port_id].data->dev_flags;
469 if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
470 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
476 snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
477 "%s", rte_eth_devices[port_id].data->name);
479 ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
483 rte_eth_dev_release_port(&rte_eth_devices[port_id]);
491 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
493 uint16_t old_nb_queues = dev->data->nb_rx_queues;
497 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
498 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
499 sizeof(dev->data->rx_queues[0]) * nb_queues,
500 RTE_CACHE_LINE_SIZE);
501 if (dev->data->rx_queues == NULL) {
502 dev->data->nb_rx_queues = 0;
505 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
506 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
508 rxq = dev->data->rx_queues;
510 for (i = nb_queues; i < old_nb_queues; i++)
511 (*dev->dev_ops->rx_queue_release)(rxq[i]);
512 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
513 RTE_CACHE_LINE_SIZE);
516 if (nb_queues > old_nb_queues) {
517 uint16_t new_qs = nb_queues - old_nb_queues;
519 memset(rxq + old_nb_queues, 0,
520 sizeof(rxq[0]) * new_qs);
523 dev->data->rx_queues = rxq;
525 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
526 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
528 rxq = dev->data->rx_queues;
530 for (i = nb_queues; i < old_nb_queues; i++)
531 (*dev->dev_ops->rx_queue_release)(rxq[i]);
533 rte_free(dev->data->rx_queues);
534 dev->data->rx_queues = NULL;
536 dev->data->nb_rx_queues = nb_queues;
541 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
543 struct rte_eth_dev *dev;
545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
547 dev = &rte_eth_devices[port_id];
548 if (rx_queue_id >= dev->data->nb_rx_queues) {
549 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
553 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
555 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
556 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
557 " already started\n",
558 rx_queue_id, port_id);
562 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
568 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
570 struct rte_eth_dev *dev;
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
574 dev = &rte_eth_devices[port_id];
575 if (rx_queue_id >= dev->data->nb_rx_queues) {
576 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
580 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
582 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
583 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
584 " already stopped\n",
585 rx_queue_id, port_id);
589 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
594 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
596 struct rte_eth_dev *dev;
598 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
600 dev = &rte_eth_devices[port_id];
601 if (tx_queue_id >= dev->data->nb_tx_queues) {
602 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
606 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
608 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
609 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
610 " already started\n",
611 tx_queue_id, port_id);
615 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
621 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
623 struct rte_eth_dev *dev;
625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
627 dev = &rte_eth_devices[port_id];
628 if (tx_queue_id >= dev->data->nb_tx_queues) {
629 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
633 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
635 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
636 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
637 " already stopped\n",
638 tx_queue_id, port_id);
642 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
647 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
649 uint16_t old_nb_queues = dev->data->nb_tx_queues;
653 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
654 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
655 sizeof(dev->data->tx_queues[0]) * nb_queues,
656 RTE_CACHE_LINE_SIZE);
657 if (dev->data->tx_queues == NULL) {
658 dev->data->nb_tx_queues = 0;
661 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
662 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
664 txq = dev->data->tx_queues;
666 for (i = nb_queues; i < old_nb_queues; i++)
667 (*dev->dev_ops->tx_queue_release)(txq[i]);
668 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
669 RTE_CACHE_LINE_SIZE);
672 if (nb_queues > old_nb_queues) {
673 uint16_t new_qs = nb_queues - old_nb_queues;
675 memset(txq + old_nb_queues, 0,
676 sizeof(txq[0]) * new_qs);
679 dev->data->tx_queues = txq;
681 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
682 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
684 txq = dev->data->tx_queues;
686 for (i = nb_queues; i < old_nb_queues; i++)
687 (*dev->dev_ops->tx_queue_release)(txq[i]);
689 rte_free(dev->data->tx_queues);
690 dev->data->tx_queues = NULL;
692 dev->data->nb_tx_queues = nb_queues;
697 rte_eth_speed_bitflag(uint32_t speed, int duplex)
700 case ETH_SPEED_NUM_10M:
701 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
702 case ETH_SPEED_NUM_100M:
703 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
704 case ETH_SPEED_NUM_1G:
705 return ETH_LINK_SPEED_1G;
706 case ETH_SPEED_NUM_2_5G:
707 return ETH_LINK_SPEED_2_5G;
708 case ETH_SPEED_NUM_5G:
709 return ETH_LINK_SPEED_5G;
710 case ETH_SPEED_NUM_10G:
711 return ETH_LINK_SPEED_10G;
712 case ETH_SPEED_NUM_20G:
713 return ETH_LINK_SPEED_20G;
714 case ETH_SPEED_NUM_25G:
715 return ETH_LINK_SPEED_25G;
716 case ETH_SPEED_NUM_40G:
717 return ETH_LINK_SPEED_40G;
718 case ETH_SPEED_NUM_50G:
719 return ETH_LINK_SPEED_50G;
720 case ETH_SPEED_NUM_56G:
721 return ETH_LINK_SPEED_56G;
722 case ETH_SPEED_NUM_100G:
723 return ETH_LINK_SPEED_100G;
730 * A conversion function from rxmode bitfield API.
733 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
734 uint64_t *rx_offloads)
736 uint64_t offloads = 0;
738 if (rxmode->header_split == 1)
739 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
740 if (rxmode->hw_ip_checksum == 1)
741 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
742 if (rxmode->hw_vlan_filter == 1)
743 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
744 if (rxmode->hw_vlan_strip == 1)
745 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
746 if (rxmode->hw_vlan_extend == 1)
747 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
748 if (rxmode->jumbo_frame == 1)
749 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
750 if (rxmode->hw_strip_crc == 1)
751 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
752 if (rxmode->enable_scatter == 1)
753 offloads |= DEV_RX_OFFLOAD_SCATTER;
754 if (rxmode->enable_lro == 1)
755 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
756 if (rxmode->hw_timestamp == 1)
757 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
758 if (rxmode->security == 1)
759 offloads |= DEV_RX_OFFLOAD_SECURITY;
761 *rx_offloads = offloads;
765 * A conversion function from rxmode offloads API.
768 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
769 struct rte_eth_rxmode *rxmode)
772 if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
773 rxmode->header_split = 1;
775 rxmode->header_split = 0;
776 if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
777 rxmode->hw_ip_checksum = 1;
779 rxmode->hw_ip_checksum = 0;
780 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
781 rxmode->hw_vlan_filter = 1;
783 rxmode->hw_vlan_filter = 0;
784 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
785 rxmode->hw_vlan_strip = 1;
787 rxmode->hw_vlan_strip = 0;
788 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
789 rxmode->hw_vlan_extend = 1;
791 rxmode->hw_vlan_extend = 0;
792 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
793 rxmode->jumbo_frame = 1;
795 rxmode->jumbo_frame = 0;
796 if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
797 rxmode->hw_strip_crc = 1;
799 rxmode->hw_strip_crc = 0;
800 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
801 rxmode->enable_scatter = 1;
803 rxmode->enable_scatter = 0;
804 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
805 rxmode->enable_lro = 1;
807 rxmode->enable_lro = 0;
808 if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
809 rxmode->hw_timestamp = 1;
811 rxmode->hw_timestamp = 0;
812 if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
813 rxmode->security = 1;
815 rxmode->security = 0;
818 const char * __rte_experimental
819 rte_eth_dev_rx_offload_name(uint64_t offload)
821 const char *name = "UNKNOWN";
824 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
825 if (offload == rte_rx_offload_names[i].offload) {
826 name = rte_rx_offload_names[i].name;
834 const char * __rte_experimental
835 rte_eth_dev_tx_offload_name(uint64_t offload)
837 const char *name = "UNKNOWN";
840 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
841 if (offload == rte_tx_offload_names[i].offload) {
842 name = rte_tx_offload_names[i].name;
851 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
852 const struct rte_eth_conf *dev_conf)
854 struct rte_eth_dev *dev;
855 struct rte_eth_dev_info dev_info;
856 struct rte_eth_conf local_conf = *dev_conf;
859 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
861 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
863 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
864 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
868 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
870 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
871 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
875 dev = &rte_eth_devices[port_id];
877 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
878 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
880 if (dev->data->dev_started) {
882 "port %d must be stopped to allow configuration\n", port_id);
887 * Convert between the offloads API to enable PMDs to support
890 if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
891 rte_eth_convert_rx_offload_bitfield(
892 &dev_conf->rxmode, &local_conf.rxmode.offloads);
894 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
898 /* Copy the dev_conf parameter into the dev structure */
899 memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
902 * Check that the numbers of RX and TX queues are not greater
903 * than the maximum number of RX and TX queues supported by the
906 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
908 if (nb_rx_q == 0 && nb_tx_q == 0) {
909 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
913 if (nb_rx_q > dev_info.max_rx_queues) {
914 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
915 port_id, nb_rx_q, dev_info.max_rx_queues);
919 if (nb_tx_q > dev_info.max_tx_queues) {
920 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
921 port_id, nb_tx_q, dev_info.max_tx_queues);
925 /* Check that the device supports requested interrupts */
926 if ((dev_conf->intr_conf.lsc == 1) &&
927 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
928 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
929 dev->device->driver->name);
932 if ((dev_conf->intr_conf.rmv == 1) &&
933 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
934 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
935 dev->device->driver->name);
940 * If jumbo frames are enabled, check that the maximum RX packet
941 * length is supported by the configured device.
943 if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
944 if (dev_conf->rxmode.max_rx_pkt_len >
945 dev_info.max_rx_pktlen) {
946 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
947 " > max valid value %u\n",
949 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
950 (unsigned)dev_info.max_rx_pktlen);
952 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
953 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
954 " < min valid value %u\n",
956 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
957 (unsigned)ETHER_MIN_LEN);
961 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
962 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
963 /* Use default value */
964 dev->data->dev_conf.rxmode.max_rx_pkt_len =
969 * Setup new number of RX/TX queues and reconfigure device.
971 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
973 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
978 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
980 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
982 rte_eth_dev_rx_queue_config(dev, 0);
986 diag = (*dev->dev_ops->dev_configure)(dev);
988 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
990 rte_eth_dev_rx_queue_config(dev, 0);
991 rte_eth_dev_tx_queue_config(dev, 0);
992 return eth_err(port_id, diag);
995 /* Initialize Rx profiling if enabled at compilation time. */
996 diag = __rte_eth_profile_rx_init(port_id, dev);
998 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1000 rte_eth_dev_rx_queue_config(dev, 0);
1001 rte_eth_dev_tx_queue_config(dev, 0);
1002 return eth_err(port_id, diag);
1009 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1011 if (dev->data->dev_started) {
1012 RTE_PMD_DEBUG_TRACE(
1013 "port %d must be stopped to allow reset\n",
1014 dev->data->port_id);
1018 rte_eth_dev_rx_queue_config(dev, 0);
1019 rte_eth_dev_tx_queue_config(dev, 0);
1021 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1025 rte_eth_dev_config_restore(uint16_t port_id)
1027 struct rte_eth_dev *dev;
1028 struct rte_eth_dev_info dev_info;
1029 struct ether_addr *addr;
1034 dev = &rte_eth_devices[port_id];
1036 rte_eth_dev_info_get(port_id, &dev_info);
1038 /* replay MAC address configuration including default MAC */
1039 addr = &dev->data->mac_addrs[0];
1040 if (*dev->dev_ops->mac_addr_set != NULL)
1041 (*dev->dev_ops->mac_addr_set)(dev, addr);
1042 else if (*dev->dev_ops->mac_addr_add != NULL)
1043 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1045 if (*dev->dev_ops->mac_addr_add != NULL) {
1046 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1047 addr = &dev->data->mac_addrs[i];
1049 /* skip zero address */
1050 if (is_zero_ether_addr(addr))
1054 pool_mask = dev->data->mac_pool_sel[i];
1057 if (pool_mask & 1ULL)
1058 (*dev->dev_ops->mac_addr_add)(dev,
1062 } while (pool_mask);
1066 /* replay promiscuous configuration */
1067 if (rte_eth_promiscuous_get(port_id) == 1)
1068 rte_eth_promiscuous_enable(port_id);
1069 else if (rte_eth_promiscuous_get(port_id) == 0)
1070 rte_eth_promiscuous_disable(port_id);
1072 /* replay all multicast configuration */
1073 if (rte_eth_allmulticast_get(port_id) == 1)
1074 rte_eth_allmulticast_enable(port_id);
1075 else if (rte_eth_allmulticast_get(port_id) == 0)
1076 rte_eth_allmulticast_disable(port_id);
1080 rte_eth_dev_start(uint16_t port_id)
1082 struct rte_eth_dev *dev;
1085 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1087 dev = &rte_eth_devices[port_id];
1089 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1091 if (dev->data->dev_started != 0) {
1092 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1093 " already started\n",
1098 diag = (*dev->dev_ops->dev_start)(dev);
1100 dev->data->dev_started = 1;
1102 return eth_err(port_id, diag);
1104 rte_eth_dev_config_restore(port_id);
1106 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1107 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1108 (*dev->dev_ops->link_update)(dev, 0);
1114 rte_eth_dev_stop(uint16_t port_id)
1116 struct rte_eth_dev *dev;
1118 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1119 dev = &rte_eth_devices[port_id];
1121 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1123 if (dev->data->dev_started == 0) {
1124 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1125 " already stopped\n",
1130 dev->data->dev_started = 0;
1131 (*dev->dev_ops->dev_stop)(dev);
1135 rte_eth_dev_set_link_up(uint16_t port_id)
1137 struct rte_eth_dev *dev;
1139 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1141 dev = &rte_eth_devices[port_id];
1143 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1144 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1148 rte_eth_dev_set_link_down(uint16_t port_id)
1150 struct rte_eth_dev *dev;
1152 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1154 dev = &rte_eth_devices[port_id];
1156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1157 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1161 rte_eth_dev_close(uint16_t port_id)
1163 struct rte_eth_dev *dev;
1165 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1166 dev = &rte_eth_devices[port_id];
1168 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1169 dev->data->dev_started = 0;
1170 (*dev->dev_ops->dev_close)(dev);
1172 dev->data->nb_rx_queues = 0;
1173 rte_free(dev->data->rx_queues);
1174 dev->data->rx_queues = NULL;
1175 dev->data->nb_tx_queues = 0;
1176 rte_free(dev->data->tx_queues);
1177 dev->data->tx_queues = NULL;
1181 rte_eth_dev_reset(uint16_t port_id)
1183 struct rte_eth_dev *dev;
1186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1187 dev = &rte_eth_devices[port_id];
1189 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1191 rte_eth_dev_stop(port_id);
1192 ret = dev->dev_ops->dev_reset(dev);
1194 return eth_err(port_id, ret);
1197 int __rte_experimental
1198 rte_eth_dev_is_removed(uint16_t port_id)
1200 struct rte_eth_dev *dev;
1203 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1205 dev = &rte_eth_devices[port_id];
1207 if (dev->state == RTE_ETH_DEV_REMOVED)
1210 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1212 ret = dev->dev_ops->is_removed(dev);
1214 /* Device is physically removed. */
1215 dev->state = RTE_ETH_DEV_REMOVED;
1221 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1222 uint16_t nb_rx_desc, unsigned int socket_id,
1223 const struct rte_eth_rxconf *rx_conf,
1224 struct rte_mempool *mp)
1227 uint32_t mbp_buf_size;
1228 struct rte_eth_dev *dev;
1229 struct rte_eth_dev_info dev_info;
1230 struct rte_eth_rxconf local_conf;
1233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1235 dev = &rte_eth_devices[port_id];
1236 if (rx_queue_id >= dev->data->nb_rx_queues) {
1237 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1241 if (dev->data->dev_started) {
1242 RTE_PMD_DEBUG_TRACE(
1243 "port %d must be stopped to allow configuration\n", port_id);
1247 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1248 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1251 * Check the size of the mbuf data buffer.
1252 * This value must be provided in the private data of the memory pool.
1253 * First check that the memory pool has a valid private data.
1255 rte_eth_dev_info_get(port_id, &dev_info);
1256 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1257 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1258 mp->name, (int) mp->private_data_size,
1259 (int) sizeof(struct rte_pktmbuf_pool_private));
1262 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1264 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1265 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1266 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1270 (int)(RTE_PKTMBUF_HEADROOM +
1271 dev_info.min_rx_bufsize),
1272 (int)RTE_PKTMBUF_HEADROOM,
1273 (int)dev_info.min_rx_bufsize);
1277 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1278 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1279 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1281 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1282 "should be: <= %hu, = %hu, and a product of %hu\n",
1284 dev_info.rx_desc_lim.nb_max,
1285 dev_info.rx_desc_lim.nb_min,
1286 dev_info.rx_desc_lim.nb_align);
1290 rxq = dev->data->rx_queues;
1291 if (rxq[rx_queue_id]) {
1292 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1294 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1295 rxq[rx_queue_id] = NULL;
1298 if (rx_conf == NULL)
1299 rx_conf = &dev_info.default_rxconf;
1301 local_conf = *rx_conf;
1302 if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1304 * Reflect port offloads to queue offloads in order for
1305 * offloads to not be discarded.
1307 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1308 &local_conf.offloads);
1311 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1312 socket_id, &local_conf, mp);
1314 if (!dev->data->min_rx_buf_size ||
1315 dev->data->min_rx_buf_size > mbp_buf_size)
1316 dev->data->min_rx_buf_size = mbp_buf_size;
1319 return eth_err(port_id, ret);
1323 * A conversion function from txq_flags API.
1326 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1328 uint64_t offloads = 0;
1330 if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1331 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1332 if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1333 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1334 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1335 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1336 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1337 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1338 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1339 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1340 if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1341 (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1342 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1344 *tx_offloads = offloads;
1348 * A conversion function from offloads API.
1351 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1355 if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1356 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1357 if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1358 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1359 if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1360 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1361 if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1362 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1363 if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1364 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1365 if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1366 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1372 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1373 uint16_t nb_tx_desc, unsigned int socket_id,
1374 const struct rte_eth_txconf *tx_conf)
1376 struct rte_eth_dev *dev;
1377 struct rte_eth_dev_info dev_info;
1378 struct rte_eth_txconf local_conf;
1381 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1383 dev = &rte_eth_devices[port_id];
1384 if (tx_queue_id >= dev->data->nb_tx_queues) {
1385 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1389 if (dev->data->dev_started) {
1390 RTE_PMD_DEBUG_TRACE(
1391 "port %d must be stopped to allow configuration\n", port_id);
1395 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1398 rte_eth_dev_info_get(port_id, &dev_info);
1400 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1401 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1402 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1403 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1404 "should be: <= %hu, = %hu, and a product of %hu\n",
1406 dev_info.tx_desc_lim.nb_max,
1407 dev_info.tx_desc_lim.nb_min,
1408 dev_info.tx_desc_lim.nb_align);
1412 txq = dev->data->tx_queues;
1413 if (txq[tx_queue_id]) {
1414 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1416 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1417 txq[tx_queue_id] = NULL;
1420 if (tx_conf == NULL)
1421 tx_conf = &dev_info.default_txconf;
1424 * Convert between the offloads API to enable PMDs to support
1427 local_conf = *tx_conf;
1428 if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1429 rte_eth_convert_txq_offloads(tx_conf->offloads,
1430 &local_conf.txq_flags);
1431 /* Keep the ignore flag. */
1432 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1434 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1435 &local_conf.offloads);
1438 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1439 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1443 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1444 void *userdata __rte_unused)
1448 for (i = 0; i < unsent; i++)
1449 rte_pktmbuf_free(pkts[i]);
1453 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1456 uint64_t *count = userdata;
1459 for (i = 0; i < unsent; i++)
1460 rte_pktmbuf_free(pkts[i]);
1466 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1467 buffer_tx_error_fn cbfn, void *userdata)
1469 buffer->error_callback = cbfn;
1470 buffer->error_userdata = userdata;
1475 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1482 buffer->size = size;
1483 if (buffer->error_callback == NULL) {
1484 ret = rte_eth_tx_buffer_set_err_callback(
1485 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1492 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1494 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1497 /* Validate Input Data. Bail if not valid or not supported. */
1498 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1499 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1501 /* Call driver to free pending mbufs. */
1502 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1504 return eth_err(port_id, ret);
1508 rte_eth_promiscuous_enable(uint16_t port_id)
1510 struct rte_eth_dev *dev;
1512 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1513 dev = &rte_eth_devices[port_id];
1515 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1516 (*dev->dev_ops->promiscuous_enable)(dev);
1517 dev->data->promiscuous = 1;
1521 rte_eth_promiscuous_disable(uint16_t port_id)
1523 struct rte_eth_dev *dev;
1525 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1526 dev = &rte_eth_devices[port_id];
1528 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1529 dev->data->promiscuous = 0;
1530 (*dev->dev_ops->promiscuous_disable)(dev);
1534 rte_eth_promiscuous_get(uint16_t port_id)
1536 struct rte_eth_dev *dev;
1538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1540 dev = &rte_eth_devices[port_id];
1541 return dev->data->promiscuous;
1545 rte_eth_allmulticast_enable(uint16_t port_id)
1547 struct rte_eth_dev *dev;
1549 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1550 dev = &rte_eth_devices[port_id];
1552 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1553 (*dev->dev_ops->allmulticast_enable)(dev);
1554 dev->data->all_multicast = 1;
1558 rte_eth_allmulticast_disable(uint16_t port_id)
1560 struct rte_eth_dev *dev;
1562 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1563 dev = &rte_eth_devices[port_id];
1565 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1566 dev->data->all_multicast = 0;
1567 (*dev->dev_ops->allmulticast_disable)(dev);
1571 rte_eth_allmulticast_get(uint16_t port_id)
1573 struct rte_eth_dev *dev;
1575 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1577 dev = &rte_eth_devices[port_id];
1578 return dev->data->all_multicast;
1582 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1583 struct rte_eth_link *link)
1585 struct rte_eth_link *dst = link;
1586 struct rte_eth_link *src = &(dev->data->dev_link);
1588 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1589 *(uint64_t *)src) == 0)
1596 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1598 struct rte_eth_dev *dev;
1600 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1601 dev = &rte_eth_devices[port_id];
1603 if (dev->data->dev_conf.intr_conf.lsc != 0)
1604 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1606 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1607 (*dev->dev_ops->link_update)(dev, 1);
1608 *eth_link = dev->data->dev_link;
1613 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1615 struct rte_eth_dev *dev;
1617 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1618 dev = &rte_eth_devices[port_id];
1620 if (dev->data->dev_conf.intr_conf.lsc != 0)
1621 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1623 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1624 (*dev->dev_ops->link_update)(dev, 0);
1625 *eth_link = dev->data->dev_link;
1630 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1632 struct rte_eth_dev *dev;
1634 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1636 dev = &rte_eth_devices[port_id];
1637 memset(stats, 0, sizeof(*stats));
1639 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1640 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1641 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1645 rte_eth_stats_reset(uint16_t port_id)
1647 struct rte_eth_dev *dev;
1649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1650 dev = &rte_eth_devices[port_id];
1652 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1653 (*dev->dev_ops->stats_reset)(dev);
1654 dev->data->rx_mbuf_alloc_failed = 0;
1660 get_xstats_basic_count(struct rte_eth_dev *dev)
1662 uint16_t nb_rxqs, nb_txqs;
1665 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1666 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1668 count = RTE_NB_STATS;
1669 count += nb_rxqs * RTE_NB_RXQ_STATS;
1670 count += nb_txqs * RTE_NB_TXQ_STATS;
1676 get_xstats_count(uint16_t port_id)
1678 struct rte_eth_dev *dev;
1681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1682 dev = &rte_eth_devices[port_id];
1683 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1684 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1687 return eth_err(port_id, count);
1689 if (dev->dev_ops->xstats_get_names != NULL) {
1690 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1692 return eth_err(port_id, count);
1697 count += get_xstats_basic_count(dev);
1703 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1706 int cnt_xstats, idx_xstat;
1708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1711 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1716 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1721 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1722 if (cnt_xstats < 0) {
1723 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1727 /* Get id-name lookup table */
1728 struct rte_eth_xstat_name xstats_names[cnt_xstats];
1730 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1731 port_id, xstats_names, cnt_xstats, NULL)) {
1732 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1736 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1737 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1746 /* retrieve basic stats names */
1748 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1749 struct rte_eth_xstat_name *xstats_names)
1751 int cnt_used_entries = 0;
1752 uint32_t idx, id_queue;
1755 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1756 snprintf(xstats_names[cnt_used_entries].name,
1757 sizeof(xstats_names[0].name),
1758 "%s", rte_stats_strings[idx].name);
1761 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1762 for (id_queue = 0; id_queue < num_q; id_queue++) {
1763 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1764 snprintf(xstats_names[cnt_used_entries].name,
1765 sizeof(xstats_names[0].name),
1767 id_queue, rte_rxq_stats_strings[idx].name);
1772 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1773 for (id_queue = 0; id_queue < num_q; id_queue++) {
1774 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1775 snprintf(xstats_names[cnt_used_entries].name,
1776 sizeof(xstats_names[0].name),
1778 id_queue, rte_txq_stats_strings[idx].name);
1782 return cnt_used_entries;
1785 /* retrieve ethdev extended statistics names */
1787 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1788 struct rte_eth_xstat_name *xstats_names, unsigned int size,
1791 struct rte_eth_xstat_name *xstats_names_copy;
1792 unsigned int no_basic_stat_requested = 1;
1793 unsigned int no_ext_stat_requested = 1;
1794 unsigned int expected_entries;
1795 unsigned int basic_count;
1796 struct rte_eth_dev *dev;
1800 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1801 dev = &rte_eth_devices[port_id];
1803 basic_count = get_xstats_basic_count(dev);
1804 ret = get_xstats_count(port_id);
1807 expected_entries = (unsigned int)ret;
1809 /* Return max number of stats if no ids given */
1812 return expected_entries;
1813 else if (xstats_names && size < expected_entries)
1814 return expected_entries;
1817 if (ids && !xstats_names)
1820 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1821 uint64_t ids_copy[size];
1823 for (i = 0; i < size; i++) {
1824 if (ids[i] < basic_count) {
1825 no_basic_stat_requested = 0;
1830 * Convert ids to xstats ids that PMD knows.
1831 * ids known by user are basic + extended stats.
1833 ids_copy[i] = ids[i] - basic_count;
1836 if (no_basic_stat_requested)
1837 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1838 xstats_names, ids_copy, size);
1841 /* Retrieve all stats */
1843 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1845 if (num_stats < 0 || num_stats > (int)expected_entries)
1848 return expected_entries;
1851 xstats_names_copy = calloc(expected_entries,
1852 sizeof(struct rte_eth_xstat_name));
1854 if (!xstats_names_copy) {
1855 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1860 for (i = 0; i < size; i++) {
1861 if (ids[i] > basic_count) {
1862 no_ext_stat_requested = 0;
1868 /* Fill xstats_names_copy structure */
1869 if (ids && no_ext_stat_requested) {
1870 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
1872 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
1875 free(xstats_names_copy);
1881 for (i = 0; i < size; i++) {
1882 if (ids[i] >= expected_entries) {
1883 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1884 free(xstats_names_copy);
1887 xstats_names[i] = xstats_names_copy[ids[i]];
1890 free(xstats_names_copy);
1895 rte_eth_xstats_get_names(uint16_t port_id,
1896 struct rte_eth_xstat_name *xstats_names,
1899 struct rte_eth_dev *dev;
1900 int cnt_used_entries;
1901 int cnt_expected_entries;
1902 int cnt_driver_entries;
1904 cnt_expected_entries = get_xstats_count(port_id);
1905 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1906 (int)size < cnt_expected_entries)
1907 return cnt_expected_entries;
1909 /* port_id checked in get_xstats_count() */
1910 dev = &rte_eth_devices[port_id];
1912 cnt_used_entries = rte_eth_basic_stats_get_names(
1915 if (dev->dev_ops->xstats_get_names != NULL) {
1916 /* If there are any driver-specific xstats, append them
1919 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1921 xstats_names + cnt_used_entries,
1922 size - cnt_used_entries);
1923 if (cnt_driver_entries < 0)
1924 return eth_err(port_id, cnt_driver_entries);
1925 cnt_used_entries += cnt_driver_entries;
1928 return cnt_used_entries;
1933 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1935 struct rte_eth_dev *dev;
1936 struct rte_eth_stats eth_stats;
1937 unsigned int count = 0, i, q;
1938 uint64_t val, *stats_ptr;
1939 uint16_t nb_rxqs, nb_txqs;
1942 ret = rte_eth_stats_get(port_id, ð_stats);
1946 dev = &rte_eth_devices[port_id];
1948 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1949 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1952 for (i = 0; i < RTE_NB_STATS; i++) {
1953 stats_ptr = RTE_PTR_ADD(ð_stats,
1954 rte_stats_strings[i].offset);
1956 xstats[count++].value = val;
1960 for (q = 0; q < nb_rxqs; q++) {
1961 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1962 stats_ptr = RTE_PTR_ADD(ð_stats,
1963 rte_rxq_stats_strings[i].offset +
1964 q * sizeof(uint64_t));
1966 xstats[count++].value = val;
1971 for (q = 0; q < nb_txqs; q++) {
1972 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1973 stats_ptr = RTE_PTR_ADD(ð_stats,
1974 rte_txq_stats_strings[i].offset +
1975 q * sizeof(uint64_t));
1977 xstats[count++].value = val;
1983 /* retrieve ethdev extended statistics */
1985 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1986 uint64_t *values, unsigned int size)
1988 unsigned int no_basic_stat_requested = 1;
1989 unsigned int no_ext_stat_requested = 1;
1990 unsigned int num_xstats_filled;
1991 unsigned int basic_count;
1992 uint16_t expected_entries;
1993 struct rte_eth_dev *dev;
1997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1998 ret = get_xstats_count(port_id);
2001 expected_entries = (uint16_t)ret;
2002 struct rte_eth_xstat xstats[expected_entries];
2003 dev = &rte_eth_devices[port_id];
2004 basic_count = get_xstats_basic_count(dev);
2006 /* Return max number of stats if no ids given */
2009 return expected_entries;
2010 else if (values && size < expected_entries)
2011 return expected_entries;
2017 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2018 unsigned int basic_count = get_xstats_basic_count(dev);
2019 uint64_t ids_copy[size];
2021 for (i = 0; i < size; i++) {
2022 if (ids[i] < basic_count) {
2023 no_basic_stat_requested = 0;
2028 * Convert ids to xstats ids that PMD knows.
2029 * ids known by user are basic + extended stats.
2031 ids_copy[i] = ids[i] - basic_count;
2034 if (no_basic_stat_requested)
2035 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2040 for (i = 0; i < size; i++) {
2041 if (ids[i] > basic_count) {
2042 no_ext_stat_requested = 0;
2048 /* Fill the xstats structure */
2049 if (ids && no_ext_stat_requested)
2050 ret = rte_eth_basic_stats_get(port_id, xstats);
2052 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2056 num_xstats_filled = (unsigned int)ret;
2058 /* Return all stats */
2060 for (i = 0; i < num_xstats_filled; i++)
2061 values[i] = xstats[i].value;
2062 return expected_entries;
2066 for (i = 0; i < size; i++) {
2067 if (ids[i] >= expected_entries) {
2068 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2071 values[i] = xstats[ids[i]].value;
2077 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2080 struct rte_eth_dev *dev;
2081 unsigned int count = 0, i;
2082 signed int xcount = 0;
2083 uint16_t nb_rxqs, nb_txqs;
2086 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2088 dev = &rte_eth_devices[port_id];
2090 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2091 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2093 /* Return generic statistics */
2094 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2095 (nb_txqs * RTE_NB_TXQ_STATS);
2097 /* implemented by the driver */
2098 if (dev->dev_ops->xstats_get != NULL) {
2099 /* Retrieve the xstats from the driver at the end of the
2102 xcount = (*dev->dev_ops->xstats_get)(dev,
2103 xstats ? xstats + count : NULL,
2104 (n > count) ? n - count : 0);
2107 return eth_err(port_id, xcount);
2110 if (n < count + xcount || xstats == NULL)
2111 return count + xcount;
2113 /* now fill the xstats structure */
2114 ret = rte_eth_basic_stats_get(port_id, xstats);
2119 for (i = 0; i < count; i++)
2121 /* add an offset to driver-specific stats */
2122 for ( ; i < count + xcount; i++)
2123 xstats[i].id += count;
2125 return count + xcount;
2128 /* reset ethdev extended statistics */
2130 rte_eth_xstats_reset(uint16_t port_id)
2132 struct rte_eth_dev *dev;
2134 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2135 dev = &rte_eth_devices[port_id];
2137 /* implemented by the driver */
2138 if (dev->dev_ops->xstats_reset != NULL) {
2139 (*dev->dev_ops->xstats_reset)(dev);
2143 /* fallback to default */
2144 rte_eth_stats_reset(port_id);
2148 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2151 struct rte_eth_dev *dev;
2153 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2155 dev = &rte_eth_devices[port_id];
2157 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2158 return (*dev->dev_ops->queue_stats_mapping_set)
2159 (dev, queue_id, stat_idx, is_rx);
2164 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2167 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2168 stat_idx, STAT_QMAP_TX));
2173 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2176 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2177 stat_idx, STAT_QMAP_RX));
2181 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2183 struct rte_eth_dev *dev;
2185 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2186 dev = &rte_eth_devices[port_id];
2188 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2189 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2190 fw_version, fw_size));
2194 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2196 struct rte_eth_dev *dev;
2197 const struct rte_eth_desc_lim lim = {
2198 .nb_max = UINT16_MAX,
2203 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2204 dev = &rte_eth_devices[port_id];
2206 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2207 dev_info->rx_desc_lim = lim;
2208 dev_info->tx_desc_lim = lim;
2210 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2211 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2212 dev_info->driver_name = dev->device->driver->name;
2213 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2214 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2218 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2219 uint32_t *ptypes, int num)
2222 struct rte_eth_dev *dev;
2223 const uint32_t *all_ptypes;
2225 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2226 dev = &rte_eth_devices[port_id];
2227 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2228 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2233 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2234 if (all_ptypes[i] & ptype_mask) {
2236 ptypes[j] = all_ptypes[i];
2244 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2246 struct rte_eth_dev *dev;
2248 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2249 dev = &rte_eth_devices[port_id];
2250 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2255 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2257 struct rte_eth_dev *dev;
2259 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2261 dev = &rte_eth_devices[port_id];
2262 *mtu = dev->data->mtu;
2267 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2270 struct rte_eth_dev *dev;
2272 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2273 dev = &rte_eth_devices[port_id];
2274 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2276 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2278 dev->data->mtu = mtu;
2280 return eth_err(port_id, ret);
2284 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2286 struct rte_eth_dev *dev;
2289 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2290 dev = &rte_eth_devices[port_id];
2291 if (!(dev->data->dev_conf.rxmode.offloads &
2292 DEV_RX_OFFLOAD_VLAN_FILTER)) {
2293 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2297 if (vlan_id > 4095) {
2298 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2299 port_id, (unsigned) vlan_id);
2302 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2304 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2306 struct rte_vlan_filter_conf *vfc;
2310 vfc = &dev->data->vlan_filter_conf;
2311 vidx = vlan_id / 64;
2312 vbit = vlan_id % 64;
2315 vfc->ids[vidx] |= UINT64_C(1) << vbit;
2317 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2320 return eth_err(port_id, ret);
2324 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2327 struct rte_eth_dev *dev;
2329 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2330 dev = &rte_eth_devices[port_id];
2331 if (rx_queue_id >= dev->data->nb_rx_queues) {
2332 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2336 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2337 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2343 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2344 enum rte_vlan_type vlan_type,
2347 struct rte_eth_dev *dev;
2349 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2350 dev = &rte_eth_devices[port_id];
2351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2353 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2358 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2360 struct rte_eth_dev *dev;
2364 uint64_t orig_offloads;
2366 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2367 dev = &rte_eth_devices[port_id];
2369 /* save original values in case of failure */
2370 orig_offloads = dev->data->dev_conf.rxmode.offloads;
2372 /*check which option changed by application*/
2373 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2374 org = !!(dev->data->dev_conf.rxmode.offloads &
2375 DEV_RX_OFFLOAD_VLAN_STRIP);
2378 dev->data->dev_conf.rxmode.offloads |=
2379 DEV_RX_OFFLOAD_VLAN_STRIP;
2381 dev->data->dev_conf.rxmode.offloads &=
2382 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2383 mask |= ETH_VLAN_STRIP_MASK;
2386 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2387 org = !!(dev->data->dev_conf.rxmode.offloads &
2388 DEV_RX_OFFLOAD_VLAN_FILTER);
2391 dev->data->dev_conf.rxmode.offloads |=
2392 DEV_RX_OFFLOAD_VLAN_FILTER;
2394 dev->data->dev_conf.rxmode.offloads &=
2395 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2396 mask |= ETH_VLAN_FILTER_MASK;
2399 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2400 org = !!(dev->data->dev_conf.rxmode.offloads &
2401 DEV_RX_OFFLOAD_VLAN_EXTEND);
2404 dev->data->dev_conf.rxmode.offloads |=
2405 DEV_RX_OFFLOAD_VLAN_EXTEND;
2407 dev->data->dev_conf.rxmode.offloads &=
2408 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2409 mask |= ETH_VLAN_EXTEND_MASK;
2416 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2419 * Convert to the offload bitfield API just in case the underlying PMD
2420 * still supporting it.
2422 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2423 &dev->data->dev_conf.rxmode);
2424 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2426 /* hit an error restore original values */
2427 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2428 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2429 &dev->data->dev_conf.rxmode);
2432 return eth_err(port_id, ret);
2436 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2438 struct rte_eth_dev *dev;
2441 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2442 dev = &rte_eth_devices[port_id];
2444 if (dev->data->dev_conf.rxmode.offloads &
2445 DEV_RX_OFFLOAD_VLAN_STRIP)
2446 ret |= ETH_VLAN_STRIP_OFFLOAD;
2448 if (dev->data->dev_conf.rxmode.offloads &
2449 DEV_RX_OFFLOAD_VLAN_FILTER)
2450 ret |= ETH_VLAN_FILTER_OFFLOAD;
2452 if (dev->data->dev_conf.rxmode.offloads &
2453 DEV_RX_OFFLOAD_VLAN_EXTEND)
2454 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2460 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2462 struct rte_eth_dev *dev;
2464 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2465 dev = &rte_eth_devices[port_id];
2466 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2468 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2472 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2474 struct rte_eth_dev *dev;
2476 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2477 dev = &rte_eth_devices[port_id];
2478 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2479 memset(fc_conf, 0, sizeof(*fc_conf));
2480 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2484 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2486 struct rte_eth_dev *dev;
2488 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2489 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2490 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2494 dev = &rte_eth_devices[port_id];
2495 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2496 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2500 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2501 struct rte_eth_pfc_conf *pfc_conf)
2503 struct rte_eth_dev *dev;
2505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2506 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2507 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2511 dev = &rte_eth_devices[port_id];
2512 /* High water, low water validation are device specific */
2513 if (*dev->dev_ops->priority_flow_ctrl_set)
2514 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2520 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2528 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2529 for (i = 0; i < num; i++) {
2530 if (reta_conf[i].mask)
2538 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2542 uint16_t i, idx, shift;
2548 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2552 for (i = 0; i < reta_size; i++) {
2553 idx = i / RTE_RETA_GROUP_SIZE;
2554 shift = i % RTE_RETA_GROUP_SIZE;
2555 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2556 (reta_conf[idx].reta[shift] >= max_rxq)) {
2557 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2558 "the maximum rxq index: %u\n", idx, shift,
2559 reta_conf[idx].reta[shift], max_rxq);
2568 rte_eth_dev_rss_reta_update(uint16_t port_id,
2569 struct rte_eth_rss_reta_entry64 *reta_conf,
2572 struct rte_eth_dev *dev;
2575 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2576 /* Check mask bits */
2577 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2581 dev = &rte_eth_devices[port_id];
2583 /* Check entry value */
2584 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2585 dev->data->nb_rx_queues);
2589 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2590 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2595 rte_eth_dev_rss_reta_query(uint16_t port_id,
2596 struct rte_eth_rss_reta_entry64 *reta_conf,
2599 struct rte_eth_dev *dev;
2602 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2604 /* Check mask bits */
2605 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2609 dev = &rte_eth_devices[port_id];
2610 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2611 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2616 rte_eth_dev_rss_hash_update(uint16_t port_id,
2617 struct rte_eth_rss_conf *rss_conf)
2619 struct rte_eth_dev *dev;
2621 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2622 dev = &rte_eth_devices[port_id];
2623 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2624 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2629 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2630 struct rte_eth_rss_conf *rss_conf)
2632 struct rte_eth_dev *dev;
2634 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2635 dev = &rte_eth_devices[port_id];
2636 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2637 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2642 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2643 struct rte_eth_udp_tunnel *udp_tunnel)
2645 struct rte_eth_dev *dev;
2647 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2648 if (udp_tunnel == NULL) {
2649 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2653 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2654 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2658 dev = &rte_eth_devices[port_id];
2659 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2660 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2665 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2666 struct rte_eth_udp_tunnel *udp_tunnel)
2668 struct rte_eth_dev *dev;
2670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2671 dev = &rte_eth_devices[port_id];
2673 if (udp_tunnel == NULL) {
2674 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2678 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2679 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2683 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2684 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2689 rte_eth_led_on(uint16_t port_id)
2691 struct rte_eth_dev *dev;
2693 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2694 dev = &rte_eth_devices[port_id];
2695 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2696 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2700 rte_eth_led_off(uint16_t port_id)
2702 struct rte_eth_dev *dev;
2704 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2705 dev = &rte_eth_devices[port_id];
2706 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2707 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2711 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2715 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2717 struct rte_eth_dev_info dev_info;
2718 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2722 rte_eth_dev_info_get(port_id, &dev_info);
2724 for (i = 0; i < dev_info.max_mac_addrs; i++)
2725 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2731 static const struct ether_addr null_mac_addr;
2734 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2737 struct rte_eth_dev *dev;
2742 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743 dev = &rte_eth_devices[port_id];
2744 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2746 if (is_zero_ether_addr(addr)) {
2747 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2751 if (pool >= ETH_64_POOLS) {
2752 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2756 index = get_mac_addr_index(port_id, addr);
2758 index = get_mac_addr_index(port_id, &null_mac_addr);
2760 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2765 pool_mask = dev->data->mac_pool_sel[index];
2767 /* Check if both MAC address and pool is already there, and do nothing */
2768 if (pool_mask & (1ULL << pool))
2773 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2776 /* Update address in NIC data structure */
2777 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2779 /* Update pool bitmap in NIC data structure */
2780 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2783 return eth_err(port_id, ret);
2787 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2789 struct rte_eth_dev *dev;
2792 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2793 dev = &rte_eth_devices[port_id];
2794 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2796 index = get_mac_addr_index(port_id, addr);
2798 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2800 } else if (index < 0)
2801 return 0; /* Do nothing if address wasn't found */
2804 (*dev->dev_ops->mac_addr_remove)(dev, index);
2806 /* Update address in NIC data structure */
2807 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2809 /* reset pool bitmap */
2810 dev->data->mac_pool_sel[index] = 0;
2816 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2818 struct rte_eth_dev *dev;
2820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2822 if (!is_valid_assigned_ether_addr(addr))
2825 dev = &rte_eth_devices[port_id];
2826 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2828 /* Update default address in NIC data structure */
2829 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2831 (*dev->dev_ops->mac_addr_set)(dev, addr);
2838 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2842 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2844 struct rte_eth_dev_info dev_info;
2845 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2848 rte_eth_dev_info_get(port_id, &dev_info);
2849 if (!dev->data->hash_mac_addrs)
2852 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2853 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2854 ETHER_ADDR_LEN) == 0)
2861 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2866 struct rte_eth_dev *dev;
2868 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2870 dev = &rte_eth_devices[port_id];
2871 if (is_zero_ether_addr(addr)) {
2872 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2877 index = get_hash_mac_addr_index(port_id, addr);
2878 /* Check if it's already there, and do nothing */
2879 if ((index >= 0) && on)
2884 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2885 "set in UTA\n", port_id);
2889 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2891 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2897 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2898 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2900 /* Update address in NIC data structure */
2902 ether_addr_copy(addr,
2903 &dev->data->hash_mac_addrs[index]);
2905 ether_addr_copy(&null_mac_addr,
2906 &dev->data->hash_mac_addrs[index]);
2909 return eth_err(port_id, ret);
2913 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2915 struct rte_eth_dev *dev;
2917 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2919 dev = &rte_eth_devices[port_id];
2921 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2922 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
2926 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2929 struct rte_eth_dev *dev;
2930 struct rte_eth_dev_info dev_info;
2931 struct rte_eth_link link;
2933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2935 dev = &rte_eth_devices[port_id];
2936 rte_eth_dev_info_get(port_id, &dev_info);
2937 link = dev->data->dev_link;
2939 if (queue_idx > dev_info.max_tx_queues) {
2940 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2941 "invalid queue id=%d\n", port_id, queue_idx);
2945 if (tx_rate > link.link_speed) {
2946 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2947 "bigger than link speed= %d\n",
2948 tx_rate, link.link_speed);
2952 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2953 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
2954 queue_idx, tx_rate));
2958 rte_eth_mirror_rule_set(uint16_t port_id,
2959 struct rte_eth_mirror_conf *mirror_conf,
2960 uint8_t rule_id, uint8_t on)
2962 struct rte_eth_dev *dev;
2964 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2965 if (mirror_conf->rule_type == 0) {
2966 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2970 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2971 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2976 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2977 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2978 (mirror_conf->pool_mask == 0)) {
2979 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2983 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2984 mirror_conf->vlan.vlan_mask == 0) {
2985 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2989 dev = &rte_eth_devices[port_id];
2990 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2992 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
2993 mirror_conf, rule_id, on));
2997 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2999 struct rte_eth_dev *dev;
3001 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3003 dev = &rte_eth_devices[port_id];
3004 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3006 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3010 RTE_INIT(eth_dev_init_cb_lists)
3014 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3015 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3019 rte_eth_dev_callback_register(uint16_t port_id,
3020 enum rte_eth_event_type event,
3021 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3023 struct rte_eth_dev *dev;
3024 struct rte_eth_dev_callback *user_cb;
3025 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3031 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3032 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3036 if (port_id == RTE_ETH_ALL) {
3038 last_port = RTE_MAX_ETHPORTS - 1;
3040 next_port = last_port = port_id;
3043 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3046 dev = &rte_eth_devices[next_port];
3048 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3049 if (user_cb->cb_fn == cb_fn &&
3050 user_cb->cb_arg == cb_arg &&
3051 user_cb->event == event) {
3056 /* create a new callback. */
3057 if (user_cb == NULL) {
3058 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3059 sizeof(struct rte_eth_dev_callback), 0);
3060 if (user_cb != NULL) {
3061 user_cb->cb_fn = cb_fn;
3062 user_cb->cb_arg = cb_arg;
3063 user_cb->event = event;
3064 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3067 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3068 rte_eth_dev_callback_unregister(port_id, event,
3074 } while (++next_port <= last_port);
3076 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3081 rte_eth_dev_callback_unregister(uint16_t port_id,
3082 enum rte_eth_event_type event,
3083 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3086 struct rte_eth_dev *dev;
3087 struct rte_eth_dev_callback *cb, *next;
3088 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3094 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3095 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3099 if (port_id == RTE_ETH_ALL) {
3101 last_port = RTE_MAX_ETHPORTS - 1;
3103 next_port = last_port = port_id;
3106 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3109 dev = &rte_eth_devices[next_port];
3111 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3114 next = TAILQ_NEXT(cb, next);
3116 if (cb->cb_fn != cb_fn || cb->event != event ||
3117 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3121 * if this callback is not executing right now,
3124 if (cb->active == 0) {
3125 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3131 } while (++next_port <= last_port);
3133 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3138 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3139 enum rte_eth_event_type event, void *ret_param)
3141 struct rte_eth_dev_callback *cb_lst;
3142 struct rte_eth_dev_callback dev_cb;
3145 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3146 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3147 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3151 if (ret_param != NULL)
3152 dev_cb.ret_param = ret_param;
3154 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3155 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3156 dev_cb.cb_arg, dev_cb.ret_param);
3157 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3160 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3165 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3168 struct rte_eth_dev *dev;
3169 struct rte_intr_handle *intr_handle;
3173 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3175 dev = &rte_eth_devices[port_id];
3177 if (!dev->intr_handle) {
3178 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3182 intr_handle = dev->intr_handle;
3183 if (!intr_handle->intr_vec) {
3184 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3188 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3189 vec = intr_handle->intr_vec[qid];
3190 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3191 if (rc && rc != -EEXIST) {
3192 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3193 " op %d epfd %d vec %u\n",
3194 port_id, qid, op, epfd, vec);
3201 const struct rte_memzone *
3202 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3203 uint16_t queue_id, size_t size, unsigned align,
3206 char z_name[RTE_MEMZONE_NAMESIZE];
3207 const struct rte_memzone *mz;
3209 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3210 dev->device->driver->name, ring_name,
3211 dev->data->port_id, queue_id);
3213 mz = rte_memzone_lookup(z_name);
3217 return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3221 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3222 int epfd, int op, void *data)
3225 struct rte_eth_dev *dev;
3226 struct rte_intr_handle *intr_handle;
3229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3231 dev = &rte_eth_devices[port_id];
3232 if (queue_id >= dev->data->nb_rx_queues) {
3233 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3237 if (!dev->intr_handle) {
3238 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3242 intr_handle = dev->intr_handle;
3243 if (!intr_handle->intr_vec) {
3244 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3248 vec = intr_handle->intr_vec[queue_id];
3249 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3250 if (rc && rc != -EEXIST) {
3251 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3252 " op %d epfd %d vec %u\n",
3253 port_id, queue_id, op, epfd, vec);
3261 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3264 struct rte_eth_dev *dev;
3266 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3268 dev = &rte_eth_devices[port_id];
3270 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3271 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3276 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3279 struct rte_eth_dev *dev;
3281 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3283 dev = &rte_eth_devices[port_id];
3285 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3286 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3292 rte_eth_dev_filter_supported(uint16_t port_id,
3293 enum rte_filter_type filter_type)
3295 struct rte_eth_dev *dev;
3297 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3299 dev = &rte_eth_devices[port_id];
3300 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3301 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3302 RTE_ETH_FILTER_NOP, NULL);
3306 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3307 enum rte_filter_type filter_type,
3308 enum rte_filter_op filter_op, void *arg);
3311 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3312 enum rte_filter_type filter_type,
3313 enum rte_filter_op filter_op, void *arg)
3315 struct rte_eth_fdir_info_v22 {
3316 enum rte_fdir_mode mode;
3317 struct rte_eth_fdir_masks mask;
3318 struct rte_eth_fdir_flex_conf flex_conf;
3319 uint32_t guarant_spc;
3321 uint32_t flow_types_mask[1];
3322 uint32_t max_flexpayload;
3323 uint32_t flex_payload_unit;
3324 uint32_t max_flex_payload_segment_num;
3325 uint16_t flex_payload_limit;
3326 uint32_t flex_bitmask_unit;
3327 uint32_t max_flex_bitmask_num;
3330 struct rte_eth_hash_global_conf_v22 {
3331 enum rte_eth_hash_function hash_func;
3332 uint32_t sym_hash_enable_mask[1];
3333 uint32_t valid_bit_mask[1];
3336 struct rte_eth_hash_filter_info_v22 {
3337 enum rte_eth_hash_filter_info_type info_type;
3340 struct rte_eth_hash_global_conf_v22 global_conf;
3341 struct rte_eth_input_set_conf input_set_conf;
3345 struct rte_eth_dev *dev;
3347 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3349 dev = &rte_eth_devices[port_id];
3350 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3351 if (filter_op == RTE_ETH_FILTER_INFO) {
3353 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3354 struct rte_eth_fdir_info fdir_info;
3356 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3358 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3359 filter_op, (void *)&fdir_info);
3360 fdir_info_v22->mode = fdir_info.mode;
3361 fdir_info_v22->mask = fdir_info.mask;
3362 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3363 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3364 fdir_info_v22->best_spc = fdir_info.best_spc;
3365 fdir_info_v22->flow_types_mask[0] =
3366 (uint32_t)fdir_info.flow_types_mask[0];
3367 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3368 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3369 fdir_info_v22->max_flex_payload_segment_num =
3370 fdir_info.max_flex_payload_segment_num;
3371 fdir_info_v22->flex_payload_limit =
3372 fdir_info.flex_payload_limit;
3373 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3374 fdir_info_v22->max_flex_bitmask_num =
3375 fdir_info.max_flex_bitmask_num;
3377 } else if (filter_op == RTE_ETH_FILTER_GET) {
3379 struct rte_eth_hash_filter_info f_info;
3380 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3381 (struct rte_eth_hash_filter_info_v22 *)arg;
3383 f_info.info_type = f_info_v22->info_type;
3384 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3385 filter_op, (void *)&f_info);
3387 switch (f_info_v22->info_type) {
3388 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3389 f_info_v22->info.enable = f_info.info.enable;
3391 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3392 f_info_v22->info.global_conf.hash_func =
3393 f_info.info.global_conf.hash_func;
3394 f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3396 f_info.info.global_conf.sym_hash_enable_mask[0];
3397 f_info_v22->info.global_conf.valid_bit_mask[0] =
3399 f_info.info.global_conf.valid_bit_mask[0];
3401 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3402 f_info_v22->info.input_set_conf =
3403 f_info.info.input_set_conf;
3409 } else if (filter_op == RTE_ETH_FILTER_SET) {
3410 struct rte_eth_hash_filter_info f_info;
3411 struct rte_eth_hash_filter_info_v22 *f_v22 =
3412 (struct rte_eth_hash_filter_info_v22 *)arg;
3414 f_info.info_type = f_v22->info_type;
3415 switch (f_v22->info_type) {
3416 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3417 f_info.info.enable = f_v22->info.enable;
3419 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3420 f_info.info.global_conf.hash_func =
3421 f_v22->info.global_conf.hash_func;
3422 f_info.info.global_conf.sym_hash_enable_mask[0] =
3424 f_v22->info.global_conf.sym_hash_enable_mask[0];
3425 f_info.info.global_conf.valid_bit_mask[0] =
3427 f_v22->info.global_conf.valid_bit_mask[0];
3429 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3430 f_info.info.input_set_conf =
3431 f_v22->info.input_set_conf;
3436 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3439 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3442 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3445 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3446 enum rte_filter_type filter_type,
3447 enum rte_filter_op filter_op, void *arg);
3450 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3451 enum rte_filter_type filter_type,
3452 enum rte_filter_op filter_op, void *arg)
3454 struct rte_eth_dev *dev;
3456 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3458 dev = &rte_eth_devices[port_id];
3459 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3460 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3463 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3464 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3465 enum rte_filter_type filter_type,
3466 enum rte_filter_op filter_op, void *arg),
3467 rte_eth_dev_filter_ctrl_v1802);
3470 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3471 rte_rx_callback_fn fn, void *user_param)
3473 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3474 rte_errno = ENOTSUP;
3477 /* check input parameters */
3478 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3479 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3483 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3491 cb->param = user_param;
3493 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3494 /* Add the callbacks in fifo order. */
3495 struct rte_eth_rxtx_callback *tail =
3496 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3499 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3506 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3512 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3513 rte_rx_callback_fn fn, void *user_param)
3515 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3516 rte_errno = ENOTSUP;
3519 /* check input parameters */
3520 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3521 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3526 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3534 cb->param = user_param;
3536 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3537 /* Add the callbacks at fisrt position*/
3538 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3540 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3541 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3547 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3548 rte_tx_callback_fn fn, void *user_param)
3550 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3551 rte_errno = ENOTSUP;
3554 /* check input parameters */
3555 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3556 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3561 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3569 cb->param = user_param;
3571 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3572 /* Add the callbacks in fifo order. */
3573 struct rte_eth_rxtx_callback *tail =
3574 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3577 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3584 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3590 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3591 struct rte_eth_rxtx_callback *user_cb)
3593 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3596 /* Check input parameters. */
3597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3598 if (user_cb == NULL ||
3599 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3602 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3603 struct rte_eth_rxtx_callback *cb;
3604 struct rte_eth_rxtx_callback **prev_cb;
3607 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3608 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3609 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3611 if (cb == user_cb) {
3612 /* Remove the user cb from the callback list. */
3613 *prev_cb = cb->next;
3618 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3624 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3625 struct rte_eth_rxtx_callback *user_cb)
3627 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3630 /* Check input parameters. */
3631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3632 if (user_cb == NULL ||
3633 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3636 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3638 struct rte_eth_rxtx_callback *cb;
3639 struct rte_eth_rxtx_callback **prev_cb;
3641 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3642 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3643 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3645 if (cb == user_cb) {
3646 /* Remove the user cb from the callback list. */
3647 *prev_cb = cb->next;
3652 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3658 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3659 struct rte_eth_rxq_info *qinfo)
3661 struct rte_eth_dev *dev;
3663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3668 dev = &rte_eth_devices[port_id];
3669 if (queue_id >= dev->data->nb_rx_queues) {
3670 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3674 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3676 memset(qinfo, 0, sizeof(*qinfo));
3677 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3682 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3683 struct rte_eth_txq_info *qinfo)
3685 struct rte_eth_dev *dev;
3687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3692 dev = &rte_eth_devices[port_id];
3693 if (queue_id >= dev->data->nb_tx_queues) {
3694 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3700 memset(qinfo, 0, sizeof(*qinfo));
3701 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3706 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3707 struct ether_addr *mc_addr_set,
3708 uint32_t nb_mc_addr)
3710 struct rte_eth_dev *dev;
3712 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3714 dev = &rte_eth_devices[port_id];
3715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3716 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3717 mc_addr_set, nb_mc_addr));
3721 rte_eth_timesync_enable(uint16_t port_id)
3723 struct rte_eth_dev *dev;
3725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3726 dev = &rte_eth_devices[port_id];
3728 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3729 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3733 rte_eth_timesync_disable(uint16_t port_id)
3735 struct rte_eth_dev *dev;
3737 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3738 dev = &rte_eth_devices[port_id];
3740 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3741 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3745 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3748 struct rte_eth_dev *dev;
3750 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3751 dev = &rte_eth_devices[port_id];
3753 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3754 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3755 (dev, timestamp, flags));
3759 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3760 struct timespec *timestamp)
3762 struct rte_eth_dev *dev;
3764 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3765 dev = &rte_eth_devices[port_id];
3767 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3768 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3773 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3775 struct rte_eth_dev *dev;
3777 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3778 dev = &rte_eth_devices[port_id];
3780 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3781 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3786 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3788 struct rte_eth_dev *dev;
3790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3791 dev = &rte_eth_devices[port_id];
3793 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3794 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3799 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3801 struct rte_eth_dev *dev;
3803 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3804 dev = &rte_eth_devices[port_id];
3806 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3807 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3812 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3814 struct rte_eth_dev *dev;
3816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3818 dev = &rte_eth_devices[port_id];
3819 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3820 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3824 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3826 struct rte_eth_dev *dev;
3828 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3830 dev = &rte_eth_devices[port_id];
3831 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3832 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3836 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3838 struct rte_eth_dev *dev;
3840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3842 dev = &rte_eth_devices[port_id];
3843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3844 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3848 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3850 struct rte_eth_dev *dev;
3852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3854 dev = &rte_eth_devices[port_id];
3855 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3856 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3860 rte_eth_dev_get_dcb_info(uint16_t port_id,
3861 struct rte_eth_dcb_info *dcb_info)
3863 struct rte_eth_dev *dev;
3865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867 dev = &rte_eth_devices[port_id];
3868 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3871 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3875 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3876 struct rte_eth_l2_tunnel_conf *l2_tunnel)
3878 struct rte_eth_dev *dev;
3880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3881 if (l2_tunnel == NULL) {
3882 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3886 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3887 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3891 dev = &rte_eth_devices[port_id];
3892 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3894 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3899 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3900 struct rte_eth_l2_tunnel_conf *l2_tunnel,
3904 struct rte_eth_dev *dev;
3906 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3908 if (l2_tunnel == NULL) {
3909 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3913 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3914 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3919 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3923 dev = &rte_eth_devices[port_id];
3924 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3926 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3927 l2_tunnel, mask, en));
3931 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3932 const struct rte_eth_desc_lim *desc_lim)
3934 if (desc_lim->nb_align != 0)
3935 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3937 if (desc_lim->nb_max != 0)
3938 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3940 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3944 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3945 uint16_t *nb_rx_desc,
3946 uint16_t *nb_tx_desc)
3948 struct rte_eth_dev *dev;
3949 struct rte_eth_dev_info dev_info;
3951 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3953 dev = &rte_eth_devices[port_id];
3954 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3956 rte_eth_dev_info_get(port_id, &dev_info);
3958 if (nb_rx_desc != NULL)
3959 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3961 if (nb_tx_desc != NULL)
3962 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3968 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3970 struct rte_eth_dev *dev;
3972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3977 dev = &rte_eth_devices[port_id];
3979 if (*dev->dev_ops->pool_ops_supported == NULL)
3980 return 1; /* all pools are supported */
3982 return (*dev->dev_ops->pool_ops_supported)(dev, pool);