1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static struct rte_eth_dev_data *rte_eth_dev_data;
47 static uint8_t eth_dev_last_created_port;
49 /* spinlock for eth device callbacks */
50 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
52 /* spinlock for add/remove rx callbacks */
53 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55 /* spinlock for add/remove tx callbacks */
56 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 /* store statistics names and its offset in stats structure */
59 struct rte_eth_xstats_name_off {
60 char name[RTE_ETH_XSTATS_NAME_SIZE];
64 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
65 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
66 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
67 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
68 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
69 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
70 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
71 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
72 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
76 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
78 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
79 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
80 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
81 {"errors", offsetof(struct rte_eth_stats, q_errors)},
84 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
85 sizeof(rte_rxq_stats_strings[0]))
87 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
88 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
89 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
91 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
92 sizeof(rte_txq_stats_strings[0]))
94 #define RTE_RX_OFFLOAD_BIT2STR(_name) \
95 { DEV_RX_OFFLOAD_##_name, #_name }
100 } rte_rx_offload_names[] = {
101 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
102 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
103 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
104 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
105 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
106 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
107 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
108 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
109 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
110 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
111 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
112 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
113 RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
114 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
115 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
116 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
119 #undef RTE_RX_OFFLOAD_BIT2STR
121 #define RTE_TX_OFFLOAD_BIT2STR(_name) \
122 { DEV_TX_OFFLOAD_##_name, #_name }
124 static const struct {
127 } rte_tx_offload_names[] = {
128 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
129 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
130 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
131 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
132 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
133 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
134 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
135 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
136 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
137 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
138 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
139 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
140 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
141 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
142 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
143 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
144 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
145 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
148 #undef RTE_TX_OFFLOAD_BIT2STR
151 * The user application callback description.
153 * It contains callback address to be registered by user application,
154 * the pointer to the parameters for callback, and the event type.
156 struct rte_eth_dev_callback {
157 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
158 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
159 void *cb_arg; /**< Parameter for callback */
160 void *ret_param; /**< Return parameter */
161 enum rte_eth_event_type event; /**< Interrupt event type */
162 uint32_t active; /**< Callback is executing */
171 rte_eth_find_next(uint16_t port_id)
173 while (port_id < RTE_MAX_ETHPORTS &&
174 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
175 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
178 if (port_id >= RTE_MAX_ETHPORTS)
179 return RTE_MAX_ETHPORTS;
185 rte_eth_dev_data_alloc(void)
187 const unsigned flags = 0;
188 const struct rte_memzone *mz;
190 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
191 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
192 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
193 rte_socket_id(), flags);
195 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
197 rte_panic("Cannot allocate memzone for ethernet port data\n");
199 rte_eth_dev_data = mz->addr;
200 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
201 memset(rte_eth_dev_data, 0,
202 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
206 rte_eth_dev_allocated(const char *name)
210 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
211 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
212 strcmp(rte_eth_devices[i].data->name, name) == 0)
213 return &rte_eth_devices[i];
219 rte_eth_dev_find_free_port(void)
223 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
224 /* Using shared name field to find a free port. */
225 if (rte_eth_dev_data[i].name[0] == '\0') {
226 RTE_ASSERT(rte_eth_devices[i].state ==
231 return RTE_MAX_ETHPORTS;
234 static struct rte_eth_dev *
235 eth_dev_get(uint16_t port_id)
237 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
239 eth_dev->data = &rte_eth_dev_data[port_id];
240 eth_dev->state = RTE_ETH_DEV_ATTACHED;
242 eth_dev_last_created_port = port_id;
248 rte_eth_dev_allocate(const char *name)
251 struct rte_eth_dev *eth_dev;
253 port_id = rte_eth_dev_find_free_port();
254 if (port_id == RTE_MAX_ETHPORTS) {
255 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
259 if (rte_eth_dev_data == NULL)
260 rte_eth_dev_data_alloc();
262 if (rte_eth_dev_allocated(name) != NULL) {
263 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
268 eth_dev = eth_dev_get(port_id);
269 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
270 eth_dev->data->port_id = port_id;
271 eth_dev->data->mtu = ETHER_MTU;
273 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
279 * Attach to a port already registered by the primary process, which
280 * makes sure that the same device would have the same port id both
281 * in the primary and secondary process.
284 rte_eth_dev_attach_secondary(const char *name)
287 struct rte_eth_dev *eth_dev;
289 if (rte_eth_dev_data == NULL)
290 rte_eth_dev_data_alloc();
292 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
293 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
296 if (i == RTE_MAX_ETHPORTS) {
298 "device %s is not driven by the primary process\n",
303 eth_dev = eth_dev_get(i);
304 RTE_ASSERT(eth_dev->data->port_id == i);
310 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
315 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
316 eth_dev->state = RTE_ETH_DEV_UNUSED;
318 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
324 rte_eth_dev_is_valid_port(uint16_t port_id)
326 if (port_id >= RTE_MAX_ETHPORTS ||
327 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
334 rte_eth_dev_socket_id(uint16_t port_id)
336 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
337 return rte_eth_devices[port_id].data->numa_node;
341 rte_eth_dev_get_sec_ctx(uint8_t port_id)
343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
344 return rte_eth_devices[port_id].security_ctx;
348 rte_eth_dev_count(void)
355 RTE_ETH_FOREACH_DEV(p)
362 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
366 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
369 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
373 /* shouldn't check 'rte_eth_devices[i].data',
374 * because it might be overwritten by VDEV PMD */
375 tmp = rte_eth_dev_data[port_id].name;
381 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
386 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
390 RTE_ETH_FOREACH_DEV(i) {
392 rte_eth_dev_data[i].name, strlen(name))) {
403 eth_err(uint16_t port_id, int ret)
407 if (rte_eth_dev_is_removed(port_id))
412 /* attach the new device, then store port_id of the device */
414 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
417 int current = rte_eth_dev_count();
421 if ((devargs == NULL) || (port_id == NULL)) {
426 /* parse devargs, then retrieve device name and args */
427 if (rte_eal_parse_devargs_str(devargs, &name, &args))
430 ret = rte_eal_dev_attach(name, args);
434 /* no point looking at the port count if no port exists */
435 if (!rte_eth_dev_count()) {
436 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
441 /* if nothing happened, there is a bug here, since some driver told us
442 * it did attach a device, but did not create a port.
444 if (current == rte_eth_dev_count()) {
449 *port_id = eth_dev_last_created_port;
458 /* detach the device, then store the name of the device */
460 rte_eth_dev_detach(uint16_t port_id, char *name)
465 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
472 dev_flags = rte_eth_devices[port_id].data->dev_flags;
473 if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
474 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
480 snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
481 "%s", rte_eth_devices[port_id].data->name);
483 ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
487 rte_eth_dev_release_port(&rte_eth_devices[port_id]);
495 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
497 uint16_t old_nb_queues = dev->data->nb_rx_queues;
501 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
502 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
503 sizeof(dev->data->rx_queues[0]) * nb_queues,
504 RTE_CACHE_LINE_SIZE);
505 if (dev->data->rx_queues == NULL) {
506 dev->data->nb_rx_queues = 0;
509 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
510 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
512 rxq = dev->data->rx_queues;
514 for (i = nb_queues; i < old_nb_queues; i++)
515 (*dev->dev_ops->rx_queue_release)(rxq[i]);
516 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
517 RTE_CACHE_LINE_SIZE);
520 if (nb_queues > old_nb_queues) {
521 uint16_t new_qs = nb_queues - old_nb_queues;
523 memset(rxq + old_nb_queues, 0,
524 sizeof(rxq[0]) * new_qs);
527 dev->data->rx_queues = rxq;
529 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
530 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
532 rxq = dev->data->rx_queues;
534 for (i = nb_queues; i < old_nb_queues; i++)
535 (*dev->dev_ops->rx_queue_release)(rxq[i]);
537 rte_free(dev->data->rx_queues);
538 dev->data->rx_queues = NULL;
540 dev->data->nb_rx_queues = nb_queues;
545 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
547 struct rte_eth_dev *dev;
549 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
551 dev = &rte_eth_devices[port_id];
552 if (rx_queue_id >= dev->data->nb_rx_queues) {
553 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
557 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
559 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
560 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
561 " already started\n",
562 rx_queue_id, port_id);
566 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
572 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
574 struct rte_eth_dev *dev;
576 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
578 dev = &rte_eth_devices[port_id];
579 if (rx_queue_id >= dev->data->nb_rx_queues) {
580 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
584 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
586 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
587 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
588 " already stopped\n",
589 rx_queue_id, port_id);
593 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
598 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
600 struct rte_eth_dev *dev;
602 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
604 dev = &rte_eth_devices[port_id];
605 if (tx_queue_id >= dev->data->nb_tx_queues) {
606 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
610 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
612 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
613 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
614 " already started\n",
615 tx_queue_id, port_id);
619 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
625 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
627 struct rte_eth_dev *dev;
629 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
631 dev = &rte_eth_devices[port_id];
632 if (tx_queue_id >= dev->data->nb_tx_queues) {
633 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
637 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
639 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
640 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
641 " already stopped\n",
642 tx_queue_id, port_id);
646 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
651 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
653 uint16_t old_nb_queues = dev->data->nb_tx_queues;
657 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
658 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
659 sizeof(dev->data->tx_queues[0]) * nb_queues,
660 RTE_CACHE_LINE_SIZE);
661 if (dev->data->tx_queues == NULL) {
662 dev->data->nb_tx_queues = 0;
665 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
666 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
668 txq = dev->data->tx_queues;
670 for (i = nb_queues; i < old_nb_queues; i++)
671 (*dev->dev_ops->tx_queue_release)(txq[i]);
672 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
673 RTE_CACHE_LINE_SIZE);
676 if (nb_queues > old_nb_queues) {
677 uint16_t new_qs = nb_queues - old_nb_queues;
679 memset(txq + old_nb_queues, 0,
680 sizeof(txq[0]) * new_qs);
683 dev->data->tx_queues = txq;
685 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
686 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
688 txq = dev->data->tx_queues;
690 for (i = nb_queues; i < old_nb_queues; i++)
691 (*dev->dev_ops->tx_queue_release)(txq[i]);
693 rte_free(dev->data->tx_queues);
694 dev->data->tx_queues = NULL;
696 dev->data->nb_tx_queues = nb_queues;
701 rte_eth_speed_bitflag(uint32_t speed, int duplex)
704 case ETH_SPEED_NUM_10M:
705 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
706 case ETH_SPEED_NUM_100M:
707 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
708 case ETH_SPEED_NUM_1G:
709 return ETH_LINK_SPEED_1G;
710 case ETH_SPEED_NUM_2_5G:
711 return ETH_LINK_SPEED_2_5G;
712 case ETH_SPEED_NUM_5G:
713 return ETH_LINK_SPEED_5G;
714 case ETH_SPEED_NUM_10G:
715 return ETH_LINK_SPEED_10G;
716 case ETH_SPEED_NUM_20G:
717 return ETH_LINK_SPEED_20G;
718 case ETH_SPEED_NUM_25G:
719 return ETH_LINK_SPEED_25G;
720 case ETH_SPEED_NUM_40G:
721 return ETH_LINK_SPEED_40G;
722 case ETH_SPEED_NUM_50G:
723 return ETH_LINK_SPEED_50G;
724 case ETH_SPEED_NUM_56G:
725 return ETH_LINK_SPEED_56G;
726 case ETH_SPEED_NUM_100G:
727 return ETH_LINK_SPEED_100G;
734 * A conversion function from rxmode bitfield API.
737 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
738 uint64_t *rx_offloads)
740 uint64_t offloads = 0;
742 if (rxmode->header_split == 1)
743 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
744 if (rxmode->hw_ip_checksum == 1)
745 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
746 if (rxmode->hw_vlan_filter == 1)
747 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
748 if (rxmode->hw_vlan_strip == 1)
749 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
750 if (rxmode->hw_vlan_extend == 1)
751 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
752 if (rxmode->jumbo_frame == 1)
753 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
754 if (rxmode->hw_strip_crc == 1)
755 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
756 if (rxmode->enable_scatter == 1)
757 offloads |= DEV_RX_OFFLOAD_SCATTER;
758 if (rxmode->enable_lro == 1)
759 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
760 if (rxmode->hw_timestamp == 1)
761 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
762 if (rxmode->security == 1)
763 offloads |= DEV_RX_OFFLOAD_SECURITY;
765 *rx_offloads = offloads;
769 * A conversion function from rxmode offloads API.
772 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
773 struct rte_eth_rxmode *rxmode)
776 if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
777 rxmode->header_split = 1;
779 rxmode->header_split = 0;
780 if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
781 rxmode->hw_ip_checksum = 1;
783 rxmode->hw_ip_checksum = 0;
784 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
785 rxmode->hw_vlan_filter = 1;
787 rxmode->hw_vlan_filter = 0;
788 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
789 rxmode->hw_vlan_strip = 1;
791 rxmode->hw_vlan_strip = 0;
792 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
793 rxmode->hw_vlan_extend = 1;
795 rxmode->hw_vlan_extend = 0;
796 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
797 rxmode->jumbo_frame = 1;
799 rxmode->jumbo_frame = 0;
800 if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
801 rxmode->hw_strip_crc = 1;
803 rxmode->hw_strip_crc = 0;
804 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
805 rxmode->enable_scatter = 1;
807 rxmode->enable_scatter = 0;
808 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
809 rxmode->enable_lro = 1;
811 rxmode->enable_lro = 0;
812 if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
813 rxmode->hw_timestamp = 1;
815 rxmode->hw_timestamp = 0;
816 if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
817 rxmode->security = 1;
819 rxmode->security = 0;
822 const char * __rte_experimental
823 rte_eth_dev_rx_offload_name(uint64_t offload)
825 const char *name = "UNKNOWN";
828 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
829 if (offload == rte_rx_offload_names[i].offload) {
830 name = rte_rx_offload_names[i].name;
838 const char * __rte_experimental
839 rte_eth_dev_tx_offload_name(uint64_t offload)
841 const char *name = "UNKNOWN";
844 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
845 if (offload == rte_tx_offload_names[i].offload) {
846 name = rte_tx_offload_names[i].name;
855 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
856 const struct rte_eth_conf *dev_conf)
858 struct rte_eth_dev *dev;
859 struct rte_eth_dev_info dev_info;
860 struct rte_eth_conf local_conf = *dev_conf;
863 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
865 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
867 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
868 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
872 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
874 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
875 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
879 dev = &rte_eth_devices[port_id];
881 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
884 if (dev->data->dev_started) {
886 "port %d must be stopped to allow configuration\n", port_id);
891 * Convert between the offloads API to enable PMDs to support
894 if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
895 rte_eth_convert_rx_offload_bitfield(
896 &dev_conf->rxmode, &local_conf.rxmode.offloads);
898 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
902 /* Copy the dev_conf parameter into the dev structure */
903 memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
906 * Check that the numbers of RX and TX queues are not greater
907 * than the maximum number of RX and TX queues supported by the
910 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
912 if (nb_rx_q == 0 && nb_tx_q == 0) {
913 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
917 if (nb_rx_q > dev_info.max_rx_queues) {
918 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
919 port_id, nb_rx_q, dev_info.max_rx_queues);
923 if (nb_tx_q > dev_info.max_tx_queues) {
924 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
925 port_id, nb_tx_q, dev_info.max_tx_queues);
929 /* Check that the device supports requested interrupts */
930 if ((dev_conf->intr_conf.lsc == 1) &&
931 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
932 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
933 dev->device->driver->name);
936 if ((dev_conf->intr_conf.rmv == 1) &&
937 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
938 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
939 dev->device->driver->name);
944 * If jumbo frames are enabled, check that the maximum RX packet
945 * length is supported by the configured device.
947 if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
948 if (dev_conf->rxmode.max_rx_pkt_len >
949 dev_info.max_rx_pktlen) {
950 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
951 " > max valid value %u\n",
953 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
954 (unsigned)dev_info.max_rx_pktlen);
956 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
957 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
958 " < min valid value %u\n",
960 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
961 (unsigned)ETHER_MIN_LEN);
965 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
966 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
967 /* Use default value */
968 dev->data->dev_conf.rxmode.max_rx_pkt_len =
973 * Setup new number of RX/TX queues and reconfigure device.
975 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
977 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
982 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
984 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
986 rte_eth_dev_rx_queue_config(dev, 0);
990 diag = (*dev->dev_ops->dev_configure)(dev);
992 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
994 rte_eth_dev_rx_queue_config(dev, 0);
995 rte_eth_dev_tx_queue_config(dev, 0);
996 return eth_err(port_id, diag);
999 /* Initialize Rx profiling if enabled at compilation time. */
1000 diag = __rte_eth_profile_rx_init(port_id, dev);
1002 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1004 rte_eth_dev_rx_queue_config(dev, 0);
1005 rte_eth_dev_tx_queue_config(dev, 0);
1006 return eth_err(port_id, diag);
1013 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1015 if (dev->data->dev_started) {
1016 RTE_PMD_DEBUG_TRACE(
1017 "port %d must be stopped to allow reset\n",
1018 dev->data->port_id);
1022 rte_eth_dev_rx_queue_config(dev, 0);
1023 rte_eth_dev_tx_queue_config(dev, 0);
1025 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1029 rte_eth_dev_config_restore(uint16_t port_id)
1031 struct rte_eth_dev *dev;
1032 struct rte_eth_dev_info dev_info;
1033 struct ether_addr *addr;
1038 dev = &rte_eth_devices[port_id];
1040 rte_eth_dev_info_get(port_id, &dev_info);
1042 /* replay MAC address configuration including default MAC */
1043 addr = &dev->data->mac_addrs[0];
1044 if (*dev->dev_ops->mac_addr_set != NULL)
1045 (*dev->dev_ops->mac_addr_set)(dev, addr);
1046 else if (*dev->dev_ops->mac_addr_add != NULL)
1047 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1049 if (*dev->dev_ops->mac_addr_add != NULL) {
1050 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1051 addr = &dev->data->mac_addrs[i];
1053 /* skip zero address */
1054 if (is_zero_ether_addr(addr))
1058 pool_mask = dev->data->mac_pool_sel[i];
1061 if (pool_mask & 1ULL)
1062 (*dev->dev_ops->mac_addr_add)(dev,
1066 } while (pool_mask);
1070 /* replay promiscuous configuration */
1071 if (rte_eth_promiscuous_get(port_id) == 1)
1072 rte_eth_promiscuous_enable(port_id);
1073 else if (rte_eth_promiscuous_get(port_id) == 0)
1074 rte_eth_promiscuous_disable(port_id);
1076 /* replay all multicast configuration */
1077 if (rte_eth_allmulticast_get(port_id) == 1)
1078 rte_eth_allmulticast_enable(port_id);
1079 else if (rte_eth_allmulticast_get(port_id) == 0)
1080 rte_eth_allmulticast_disable(port_id);
1084 rte_eth_dev_start(uint16_t port_id)
1086 struct rte_eth_dev *dev;
1089 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1091 dev = &rte_eth_devices[port_id];
1093 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1095 if (dev->data->dev_started != 0) {
1096 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1097 " already started\n",
1102 diag = (*dev->dev_ops->dev_start)(dev);
1104 dev->data->dev_started = 1;
1106 return eth_err(port_id, diag);
1108 rte_eth_dev_config_restore(port_id);
1110 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1111 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1112 (*dev->dev_ops->link_update)(dev, 0);
1118 rte_eth_dev_stop(uint16_t port_id)
1120 struct rte_eth_dev *dev;
1122 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1123 dev = &rte_eth_devices[port_id];
1125 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1127 if (dev->data->dev_started == 0) {
1128 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1129 " already stopped\n",
1134 dev->data->dev_started = 0;
1135 (*dev->dev_ops->dev_stop)(dev);
1139 rte_eth_dev_set_link_up(uint16_t port_id)
1141 struct rte_eth_dev *dev;
1143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1145 dev = &rte_eth_devices[port_id];
1147 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1148 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1152 rte_eth_dev_set_link_down(uint16_t port_id)
1154 struct rte_eth_dev *dev;
1156 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1158 dev = &rte_eth_devices[port_id];
1160 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1161 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1165 rte_eth_dev_close(uint16_t port_id)
1167 struct rte_eth_dev *dev;
1169 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1170 dev = &rte_eth_devices[port_id];
1172 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1173 dev->data->dev_started = 0;
1174 (*dev->dev_ops->dev_close)(dev);
1176 dev->data->nb_rx_queues = 0;
1177 rte_free(dev->data->rx_queues);
1178 dev->data->rx_queues = NULL;
1179 dev->data->nb_tx_queues = 0;
1180 rte_free(dev->data->tx_queues);
1181 dev->data->tx_queues = NULL;
1185 rte_eth_dev_reset(uint16_t port_id)
1187 struct rte_eth_dev *dev;
1190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1191 dev = &rte_eth_devices[port_id];
1193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1195 rte_eth_dev_stop(port_id);
1196 ret = dev->dev_ops->dev_reset(dev);
1198 return eth_err(port_id, ret);
1201 int __rte_experimental
1202 rte_eth_dev_is_removed(uint16_t port_id)
1204 struct rte_eth_dev *dev;
1207 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1209 dev = &rte_eth_devices[port_id];
1211 if (dev->state == RTE_ETH_DEV_REMOVED)
1214 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1216 ret = dev->dev_ops->is_removed(dev);
1218 /* Device is physically removed. */
1219 dev->state = RTE_ETH_DEV_REMOVED;
1225 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1226 uint16_t nb_rx_desc, unsigned int socket_id,
1227 const struct rte_eth_rxconf *rx_conf,
1228 struct rte_mempool *mp)
1231 uint32_t mbp_buf_size;
1232 struct rte_eth_dev *dev;
1233 struct rte_eth_dev_info dev_info;
1234 struct rte_eth_rxconf local_conf;
1237 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1239 dev = &rte_eth_devices[port_id];
1240 if (rx_queue_id >= dev->data->nb_rx_queues) {
1241 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1245 if (dev->data->dev_started) {
1246 RTE_PMD_DEBUG_TRACE(
1247 "port %d must be stopped to allow configuration\n", port_id);
1251 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1252 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1255 * Check the size of the mbuf data buffer.
1256 * This value must be provided in the private data of the memory pool.
1257 * First check that the memory pool has a valid private data.
1259 rte_eth_dev_info_get(port_id, &dev_info);
1260 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1261 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1262 mp->name, (int) mp->private_data_size,
1263 (int) sizeof(struct rte_pktmbuf_pool_private));
1266 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1268 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1269 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1270 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1274 (int)(RTE_PKTMBUF_HEADROOM +
1275 dev_info.min_rx_bufsize),
1276 (int)RTE_PKTMBUF_HEADROOM,
1277 (int)dev_info.min_rx_bufsize);
1281 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1282 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1283 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1285 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1286 "should be: <= %hu, = %hu, and a product of %hu\n",
1288 dev_info.rx_desc_lim.nb_max,
1289 dev_info.rx_desc_lim.nb_min,
1290 dev_info.rx_desc_lim.nb_align);
1294 rxq = dev->data->rx_queues;
1295 if (rxq[rx_queue_id]) {
1296 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1298 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1299 rxq[rx_queue_id] = NULL;
1302 if (rx_conf == NULL)
1303 rx_conf = &dev_info.default_rxconf;
1305 local_conf = *rx_conf;
1306 if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1308 * Reflect port offloads to queue offloads in order for
1309 * offloads to not be discarded.
1311 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1312 &local_conf.offloads);
1315 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1316 socket_id, &local_conf, mp);
1318 if (!dev->data->min_rx_buf_size ||
1319 dev->data->min_rx_buf_size > mbp_buf_size)
1320 dev->data->min_rx_buf_size = mbp_buf_size;
1323 return eth_err(port_id, ret);
1327 * A conversion function from txq_flags API.
1330 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1332 uint64_t offloads = 0;
1334 if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1335 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1336 if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1337 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1338 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1339 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1340 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1341 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1342 if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1343 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1344 if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1345 (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1346 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1348 *tx_offloads = offloads;
1352 * A conversion function from offloads API.
1355 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1359 if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1360 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1361 if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1362 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1363 if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1364 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1365 if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1366 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1367 if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1368 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1369 if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1370 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1376 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1377 uint16_t nb_tx_desc, unsigned int socket_id,
1378 const struct rte_eth_txconf *tx_conf)
1380 struct rte_eth_dev *dev;
1381 struct rte_eth_dev_info dev_info;
1382 struct rte_eth_txconf local_conf;
1385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1387 dev = &rte_eth_devices[port_id];
1388 if (tx_queue_id >= dev->data->nb_tx_queues) {
1389 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1393 if (dev->data->dev_started) {
1394 RTE_PMD_DEBUG_TRACE(
1395 "port %d must be stopped to allow configuration\n", port_id);
1399 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1402 rte_eth_dev_info_get(port_id, &dev_info);
1404 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1405 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1406 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1407 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1408 "should be: <= %hu, = %hu, and a product of %hu\n",
1410 dev_info.tx_desc_lim.nb_max,
1411 dev_info.tx_desc_lim.nb_min,
1412 dev_info.tx_desc_lim.nb_align);
1416 txq = dev->data->tx_queues;
1417 if (txq[tx_queue_id]) {
1418 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1420 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1421 txq[tx_queue_id] = NULL;
1424 if (tx_conf == NULL)
1425 tx_conf = &dev_info.default_txconf;
1428 * Convert between the offloads API to enable PMDs to support
1431 local_conf = *tx_conf;
1432 if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1433 rte_eth_convert_txq_offloads(tx_conf->offloads,
1434 &local_conf.txq_flags);
1435 /* Keep the ignore flag. */
1436 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1438 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1439 &local_conf.offloads);
1442 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1443 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1447 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1448 void *userdata __rte_unused)
1452 for (i = 0; i < unsent; i++)
1453 rte_pktmbuf_free(pkts[i]);
1457 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1460 uint64_t *count = userdata;
1463 for (i = 0; i < unsent; i++)
1464 rte_pktmbuf_free(pkts[i]);
1470 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1471 buffer_tx_error_fn cbfn, void *userdata)
1473 buffer->error_callback = cbfn;
1474 buffer->error_userdata = userdata;
1479 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1486 buffer->size = size;
1487 if (buffer->error_callback == NULL) {
1488 ret = rte_eth_tx_buffer_set_err_callback(
1489 buffer, rte_eth_tx_buffer_drop_callback, NULL);
1496 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1498 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1501 /* Validate Input Data. Bail if not valid or not supported. */
1502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1503 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1505 /* Call driver to free pending mbufs. */
1506 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1508 return eth_err(port_id, ret);
1512 rte_eth_promiscuous_enable(uint16_t port_id)
1514 struct rte_eth_dev *dev;
1516 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1517 dev = &rte_eth_devices[port_id];
1519 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1520 (*dev->dev_ops->promiscuous_enable)(dev);
1521 dev->data->promiscuous = 1;
1525 rte_eth_promiscuous_disable(uint16_t port_id)
1527 struct rte_eth_dev *dev;
1529 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1530 dev = &rte_eth_devices[port_id];
1532 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1533 dev->data->promiscuous = 0;
1534 (*dev->dev_ops->promiscuous_disable)(dev);
1538 rte_eth_promiscuous_get(uint16_t port_id)
1540 struct rte_eth_dev *dev;
1542 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1544 dev = &rte_eth_devices[port_id];
1545 return dev->data->promiscuous;
1549 rte_eth_allmulticast_enable(uint16_t port_id)
1551 struct rte_eth_dev *dev;
1553 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1554 dev = &rte_eth_devices[port_id];
1556 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1557 (*dev->dev_ops->allmulticast_enable)(dev);
1558 dev->data->all_multicast = 1;
1562 rte_eth_allmulticast_disable(uint16_t port_id)
1564 struct rte_eth_dev *dev;
1566 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1567 dev = &rte_eth_devices[port_id];
1569 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1570 dev->data->all_multicast = 0;
1571 (*dev->dev_ops->allmulticast_disable)(dev);
1575 rte_eth_allmulticast_get(uint16_t port_id)
1577 struct rte_eth_dev *dev;
1579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1581 dev = &rte_eth_devices[port_id];
1582 return dev->data->all_multicast;
1586 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1587 struct rte_eth_link *link)
1589 struct rte_eth_link *dst = link;
1590 struct rte_eth_link *src = &(dev->data->dev_link);
1592 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1593 *(uint64_t *)src) == 0)
1600 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1602 struct rte_eth_dev *dev;
1604 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1605 dev = &rte_eth_devices[port_id];
1607 if (dev->data->dev_conf.intr_conf.lsc != 0)
1608 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1610 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1611 (*dev->dev_ops->link_update)(dev, 1);
1612 *eth_link = dev->data->dev_link;
1617 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1619 struct rte_eth_dev *dev;
1621 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1622 dev = &rte_eth_devices[port_id];
1624 if (dev->data->dev_conf.intr_conf.lsc != 0)
1625 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1627 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1628 (*dev->dev_ops->link_update)(dev, 0);
1629 *eth_link = dev->data->dev_link;
1634 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1636 struct rte_eth_dev *dev;
1638 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1640 dev = &rte_eth_devices[port_id];
1641 memset(stats, 0, sizeof(*stats));
1643 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1644 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1645 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1649 rte_eth_stats_reset(uint16_t port_id)
1651 struct rte_eth_dev *dev;
1653 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1654 dev = &rte_eth_devices[port_id];
1656 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1657 (*dev->dev_ops->stats_reset)(dev);
1658 dev->data->rx_mbuf_alloc_failed = 0;
1664 get_xstats_basic_count(struct rte_eth_dev *dev)
1666 uint16_t nb_rxqs, nb_txqs;
1669 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1670 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1672 count = RTE_NB_STATS;
1673 count += nb_rxqs * RTE_NB_RXQ_STATS;
1674 count += nb_txqs * RTE_NB_TXQ_STATS;
1680 get_xstats_count(uint16_t port_id)
1682 struct rte_eth_dev *dev;
1685 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1686 dev = &rte_eth_devices[port_id];
1687 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1688 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1691 return eth_err(port_id, count);
1693 if (dev->dev_ops->xstats_get_names != NULL) {
1694 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1696 return eth_err(port_id, count);
1701 count += get_xstats_basic_count(dev);
1707 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1710 int cnt_xstats, idx_xstat;
1712 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1715 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1720 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1725 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1726 if (cnt_xstats < 0) {
1727 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1731 /* Get id-name lookup table */
1732 struct rte_eth_xstat_name xstats_names[cnt_xstats];
1734 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1735 port_id, xstats_names, cnt_xstats, NULL)) {
1736 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1740 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1741 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1750 /* retrieve basic stats names */
1752 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1753 struct rte_eth_xstat_name *xstats_names)
1755 int cnt_used_entries = 0;
1756 uint32_t idx, id_queue;
1759 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1760 snprintf(xstats_names[cnt_used_entries].name,
1761 sizeof(xstats_names[0].name),
1762 "%s", rte_stats_strings[idx].name);
1765 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1766 for (id_queue = 0; id_queue < num_q; id_queue++) {
1767 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1768 snprintf(xstats_names[cnt_used_entries].name,
1769 sizeof(xstats_names[0].name),
1771 id_queue, rte_rxq_stats_strings[idx].name);
1776 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1777 for (id_queue = 0; id_queue < num_q; id_queue++) {
1778 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1779 snprintf(xstats_names[cnt_used_entries].name,
1780 sizeof(xstats_names[0].name),
1782 id_queue, rte_txq_stats_strings[idx].name);
1786 return cnt_used_entries;
1789 /* retrieve ethdev extended statistics names */
1791 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1792 struct rte_eth_xstat_name *xstats_names, unsigned int size,
1795 struct rte_eth_xstat_name *xstats_names_copy;
1796 unsigned int no_basic_stat_requested = 1;
1797 unsigned int no_ext_stat_requested = 1;
1798 unsigned int expected_entries;
1799 unsigned int basic_count;
1800 struct rte_eth_dev *dev;
1804 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1805 dev = &rte_eth_devices[port_id];
1807 basic_count = get_xstats_basic_count(dev);
1808 ret = get_xstats_count(port_id);
1811 expected_entries = (unsigned int)ret;
1813 /* Return max number of stats if no ids given */
1816 return expected_entries;
1817 else if (xstats_names && size < expected_entries)
1818 return expected_entries;
1821 if (ids && !xstats_names)
1824 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1825 uint64_t ids_copy[size];
1827 for (i = 0; i < size; i++) {
1828 if (ids[i] < basic_count) {
1829 no_basic_stat_requested = 0;
1834 * Convert ids to xstats ids that PMD knows.
1835 * ids known by user are basic + extended stats.
1837 ids_copy[i] = ids[i] - basic_count;
1840 if (no_basic_stat_requested)
1841 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1842 xstats_names, ids_copy, size);
1845 /* Retrieve all stats */
1847 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1849 if (num_stats < 0 || num_stats > (int)expected_entries)
1852 return expected_entries;
1855 xstats_names_copy = calloc(expected_entries,
1856 sizeof(struct rte_eth_xstat_name));
1858 if (!xstats_names_copy) {
1859 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1864 for (i = 0; i < size; i++) {
1865 if (ids[i] > basic_count) {
1866 no_ext_stat_requested = 0;
1872 /* Fill xstats_names_copy structure */
1873 if (ids && no_ext_stat_requested) {
1874 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
1876 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
1879 free(xstats_names_copy);
1885 for (i = 0; i < size; i++) {
1886 if (ids[i] >= expected_entries) {
1887 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1888 free(xstats_names_copy);
1891 xstats_names[i] = xstats_names_copy[ids[i]];
1894 free(xstats_names_copy);
1899 rte_eth_xstats_get_names(uint16_t port_id,
1900 struct rte_eth_xstat_name *xstats_names,
1903 struct rte_eth_dev *dev;
1904 int cnt_used_entries;
1905 int cnt_expected_entries;
1906 int cnt_driver_entries;
1908 cnt_expected_entries = get_xstats_count(port_id);
1909 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1910 (int)size < cnt_expected_entries)
1911 return cnt_expected_entries;
1913 /* port_id checked in get_xstats_count() */
1914 dev = &rte_eth_devices[port_id];
1916 cnt_used_entries = rte_eth_basic_stats_get_names(
1919 if (dev->dev_ops->xstats_get_names != NULL) {
1920 /* If there are any driver-specific xstats, append them
1923 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1925 xstats_names + cnt_used_entries,
1926 size - cnt_used_entries);
1927 if (cnt_driver_entries < 0)
1928 return eth_err(port_id, cnt_driver_entries);
1929 cnt_used_entries += cnt_driver_entries;
1932 return cnt_used_entries;
1937 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1939 struct rte_eth_dev *dev;
1940 struct rte_eth_stats eth_stats;
1941 unsigned int count = 0, i, q;
1942 uint64_t val, *stats_ptr;
1943 uint16_t nb_rxqs, nb_txqs;
1946 ret = rte_eth_stats_get(port_id, ð_stats);
1950 dev = &rte_eth_devices[port_id];
1952 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1953 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1956 for (i = 0; i < RTE_NB_STATS; i++) {
1957 stats_ptr = RTE_PTR_ADD(ð_stats,
1958 rte_stats_strings[i].offset);
1960 xstats[count++].value = val;
1964 for (q = 0; q < nb_rxqs; q++) {
1965 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1966 stats_ptr = RTE_PTR_ADD(ð_stats,
1967 rte_rxq_stats_strings[i].offset +
1968 q * sizeof(uint64_t));
1970 xstats[count++].value = val;
1975 for (q = 0; q < nb_txqs; q++) {
1976 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1977 stats_ptr = RTE_PTR_ADD(ð_stats,
1978 rte_txq_stats_strings[i].offset +
1979 q * sizeof(uint64_t));
1981 xstats[count++].value = val;
1987 /* retrieve ethdev extended statistics */
1989 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1990 uint64_t *values, unsigned int size)
1992 unsigned int no_basic_stat_requested = 1;
1993 unsigned int no_ext_stat_requested = 1;
1994 unsigned int num_xstats_filled;
1995 unsigned int basic_count;
1996 uint16_t expected_entries;
1997 struct rte_eth_dev *dev;
2001 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2002 ret = get_xstats_count(port_id);
2005 expected_entries = (uint16_t)ret;
2006 struct rte_eth_xstat xstats[expected_entries];
2007 dev = &rte_eth_devices[port_id];
2008 basic_count = get_xstats_basic_count(dev);
2010 /* Return max number of stats if no ids given */
2013 return expected_entries;
2014 else if (values && size < expected_entries)
2015 return expected_entries;
2021 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2022 unsigned int basic_count = get_xstats_basic_count(dev);
2023 uint64_t ids_copy[size];
2025 for (i = 0; i < size; i++) {
2026 if (ids[i] < basic_count) {
2027 no_basic_stat_requested = 0;
2032 * Convert ids to xstats ids that PMD knows.
2033 * ids known by user are basic + extended stats.
2035 ids_copy[i] = ids[i] - basic_count;
2038 if (no_basic_stat_requested)
2039 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2044 for (i = 0; i < size; i++) {
2045 if (ids[i] > basic_count) {
2046 no_ext_stat_requested = 0;
2052 /* Fill the xstats structure */
2053 if (ids && no_ext_stat_requested)
2054 ret = rte_eth_basic_stats_get(port_id, xstats);
2056 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2060 num_xstats_filled = (unsigned int)ret;
2062 /* Return all stats */
2064 for (i = 0; i < num_xstats_filled; i++)
2065 values[i] = xstats[i].value;
2066 return expected_entries;
2070 for (i = 0; i < size; i++) {
2071 if (ids[i] >= expected_entries) {
2072 RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2075 values[i] = xstats[ids[i]].value;
2081 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2084 struct rte_eth_dev *dev;
2085 unsigned int count = 0, i;
2086 signed int xcount = 0;
2087 uint16_t nb_rxqs, nb_txqs;
2090 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2092 dev = &rte_eth_devices[port_id];
2094 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2095 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2097 /* Return generic statistics */
2098 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2099 (nb_txqs * RTE_NB_TXQ_STATS);
2101 /* implemented by the driver */
2102 if (dev->dev_ops->xstats_get != NULL) {
2103 /* Retrieve the xstats from the driver at the end of the
2106 xcount = (*dev->dev_ops->xstats_get)(dev,
2107 xstats ? xstats + count : NULL,
2108 (n > count) ? n - count : 0);
2111 return eth_err(port_id, xcount);
2114 if (n < count + xcount || xstats == NULL)
2115 return count + xcount;
2117 /* now fill the xstats structure */
2118 ret = rte_eth_basic_stats_get(port_id, xstats);
2123 for (i = 0; i < count; i++)
2125 /* add an offset to driver-specific stats */
2126 for ( ; i < count + xcount; i++)
2127 xstats[i].id += count;
2129 return count + xcount;
2132 /* reset ethdev extended statistics */
2134 rte_eth_xstats_reset(uint16_t port_id)
2136 struct rte_eth_dev *dev;
2138 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2139 dev = &rte_eth_devices[port_id];
2141 /* implemented by the driver */
2142 if (dev->dev_ops->xstats_reset != NULL) {
2143 (*dev->dev_ops->xstats_reset)(dev);
2147 /* fallback to default */
2148 rte_eth_stats_reset(port_id);
2152 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2155 struct rte_eth_dev *dev;
2157 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2159 dev = &rte_eth_devices[port_id];
2161 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2162 return (*dev->dev_ops->queue_stats_mapping_set)
2163 (dev, queue_id, stat_idx, is_rx);
2168 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2171 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2172 stat_idx, STAT_QMAP_TX));
2177 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2180 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2181 stat_idx, STAT_QMAP_RX));
2185 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2187 struct rte_eth_dev *dev;
2189 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2190 dev = &rte_eth_devices[port_id];
2192 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2193 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2194 fw_version, fw_size));
2198 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2200 struct rte_eth_dev *dev;
2201 const struct rte_eth_desc_lim lim = {
2202 .nb_max = UINT16_MAX,
2207 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2208 dev = &rte_eth_devices[port_id];
2210 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2211 dev_info->rx_desc_lim = lim;
2212 dev_info->tx_desc_lim = lim;
2214 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2215 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2216 dev_info->driver_name = dev->device->driver->name;
2217 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2218 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2222 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2223 uint32_t *ptypes, int num)
2226 struct rte_eth_dev *dev;
2227 const uint32_t *all_ptypes;
2229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2230 dev = &rte_eth_devices[port_id];
2231 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2232 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2237 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2238 if (all_ptypes[i] & ptype_mask) {
2240 ptypes[j] = all_ptypes[i];
2248 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2250 struct rte_eth_dev *dev;
2252 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2253 dev = &rte_eth_devices[port_id];
2254 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2259 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2261 struct rte_eth_dev *dev;
2263 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2265 dev = &rte_eth_devices[port_id];
2266 *mtu = dev->data->mtu;
2271 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2274 struct rte_eth_dev *dev;
2276 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2277 dev = &rte_eth_devices[port_id];
2278 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2280 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2282 dev->data->mtu = mtu;
2284 return eth_err(port_id, ret);
2288 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2290 struct rte_eth_dev *dev;
2293 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2294 dev = &rte_eth_devices[port_id];
2295 if (!(dev->data->dev_conf.rxmode.offloads &
2296 DEV_RX_OFFLOAD_VLAN_FILTER)) {
2297 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2301 if (vlan_id > 4095) {
2302 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2303 port_id, (unsigned) vlan_id);
2306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2308 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2310 struct rte_vlan_filter_conf *vfc;
2314 vfc = &dev->data->vlan_filter_conf;
2315 vidx = vlan_id / 64;
2316 vbit = vlan_id % 64;
2319 vfc->ids[vidx] |= UINT64_C(1) << vbit;
2321 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2324 return eth_err(port_id, ret);
2328 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2331 struct rte_eth_dev *dev;
2333 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2334 dev = &rte_eth_devices[port_id];
2335 if (rx_queue_id >= dev->data->nb_rx_queues) {
2336 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2340 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2341 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2347 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2348 enum rte_vlan_type vlan_type,
2351 struct rte_eth_dev *dev;
2353 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354 dev = &rte_eth_devices[port_id];
2355 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2357 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2362 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2364 struct rte_eth_dev *dev;
2368 uint64_t orig_offloads;
2370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2371 dev = &rte_eth_devices[port_id];
2373 /* save original values in case of failure */
2374 orig_offloads = dev->data->dev_conf.rxmode.offloads;
2376 /*check which option changed by application*/
2377 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2378 org = !!(dev->data->dev_conf.rxmode.offloads &
2379 DEV_RX_OFFLOAD_VLAN_STRIP);
2382 dev->data->dev_conf.rxmode.offloads |=
2383 DEV_RX_OFFLOAD_VLAN_STRIP;
2385 dev->data->dev_conf.rxmode.offloads &=
2386 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2387 mask |= ETH_VLAN_STRIP_MASK;
2390 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2391 org = !!(dev->data->dev_conf.rxmode.offloads &
2392 DEV_RX_OFFLOAD_VLAN_FILTER);
2395 dev->data->dev_conf.rxmode.offloads |=
2396 DEV_RX_OFFLOAD_VLAN_FILTER;
2398 dev->data->dev_conf.rxmode.offloads &=
2399 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2400 mask |= ETH_VLAN_FILTER_MASK;
2403 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2404 org = !!(dev->data->dev_conf.rxmode.offloads &
2405 DEV_RX_OFFLOAD_VLAN_EXTEND);
2408 dev->data->dev_conf.rxmode.offloads |=
2409 DEV_RX_OFFLOAD_VLAN_EXTEND;
2411 dev->data->dev_conf.rxmode.offloads &=
2412 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2413 mask |= ETH_VLAN_EXTEND_MASK;
2420 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2423 * Convert to the offload bitfield API just in case the underlying PMD
2424 * still supporting it.
2426 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2427 &dev->data->dev_conf.rxmode);
2428 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2430 /* hit an error restore original values */
2431 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2432 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2433 &dev->data->dev_conf.rxmode);
2436 return eth_err(port_id, ret);
2440 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2442 struct rte_eth_dev *dev;
2445 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2446 dev = &rte_eth_devices[port_id];
2448 if (dev->data->dev_conf.rxmode.offloads &
2449 DEV_RX_OFFLOAD_VLAN_STRIP)
2450 ret |= ETH_VLAN_STRIP_OFFLOAD;
2452 if (dev->data->dev_conf.rxmode.offloads &
2453 DEV_RX_OFFLOAD_VLAN_FILTER)
2454 ret |= ETH_VLAN_FILTER_OFFLOAD;
2456 if (dev->data->dev_conf.rxmode.offloads &
2457 DEV_RX_OFFLOAD_VLAN_EXTEND)
2458 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2464 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2466 struct rte_eth_dev *dev;
2468 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2469 dev = &rte_eth_devices[port_id];
2470 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2472 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2476 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2478 struct rte_eth_dev *dev;
2480 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2481 dev = &rte_eth_devices[port_id];
2482 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2483 memset(fc_conf, 0, sizeof(*fc_conf));
2484 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2488 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2490 struct rte_eth_dev *dev;
2492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2493 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2494 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2498 dev = &rte_eth_devices[port_id];
2499 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2500 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2504 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2505 struct rte_eth_pfc_conf *pfc_conf)
2507 struct rte_eth_dev *dev;
2509 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2510 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2511 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2515 dev = &rte_eth_devices[port_id];
2516 /* High water, low water validation are device specific */
2517 if (*dev->dev_ops->priority_flow_ctrl_set)
2518 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2524 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2532 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2533 for (i = 0; i < num; i++) {
2534 if (reta_conf[i].mask)
2542 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2546 uint16_t i, idx, shift;
2552 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2556 for (i = 0; i < reta_size; i++) {
2557 idx = i / RTE_RETA_GROUP_SIZE;
2558 shift = i % RTE_RETA_GROUP_SIZE;
2559 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2560 (reta_conf[idx].reta[shift] >= max_rxq)) {
2561 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2562 "the maximum rxq index: %u\n", idx, shift,
2563 reta_conf[idx].reta[shift], max_rxq);
2572 rte_eth_dev_rss_reta_update(uint16_t port_id,
2573 struct rte_eth_rss_reta_entry64 *reta_conf,
2576 struct rte_eth_dev *dev;
2579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2580 /* Check mask bits */
2581 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2585 dev = &rte_eth_devices[port_id];
2587 /* Check entry value */
2588 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2589 dev->data->nb_rx_queues);
2593 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2594 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2599 rte_eth_dev_rss_reta_query(uint16_t port_id,
2600 struct rte_eth_rss_reta_entry64 *reta_conf,
2603 struct rte_eth_dev *dev;
2606 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2608 /* Check mask bits */
2609 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2613 dev = &rte_eth_devices[port_id];
2614 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2615 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2620 rte_eth_dev_rss_hash_update(uint16_t port_id,
2621 struct rte_eth_rss_conf *rss_conf)
2623 struct rte_eth_dev *dev;
2625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2626 dev = &rte_eth_devices[port_id];
2627 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2628 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2633 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2634 struct rte_eth_rss_conf *rss_conf)
2636 struct rte_eth_dev *dev;
2638 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2639 dev = &rte_eth_devices[port_id];
2640 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2641 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2646 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2647 struct rte_eth_udp_tunnel *udp_tunnel)
2649 struct rte_eth_dev *dev;
2651 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2652 if (udp_tunnel == NULL) {
2653 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2657 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2658 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2662 dev = &rte_eth_devices[port_id];
2663 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2664 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2669 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2670 struct rte_eth_udp_tunnel *udp_tunnel)
2672 struct rte_eth_dev *dev;
2674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2675 dev = &rte_eth_devices[port_id];
2677 if (udp_tunnel == NULL) {
2678 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2682 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2683 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2687 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2688 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2693 rte_eth_led_on(uint16_t port_id)
2695 struct rte_eth_dev *dev;
2697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2698 dev = &rte_eth_devices[port_id];
2699 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2700 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2704 rte_eth_led_off(uint16_t port_id)
2706 struct rte_eth_dev *dev;
2708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2709 dev = &rte_eth_devices[port_id];
2710 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2711 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2715 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2719 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2721 struct rte_eth_dev_info dev_info;
2722 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2726 rte_eth_dev_info_get(port_id, &dev_info);
2728 for (i = 0; i < dev_info.max_mac_addrs; i++)
2729 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2735 static const struct ether_addr null_mac_addr;
2738 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2741 struct rte_eth_dev *dev;
2746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2747 dev = &rte_eth_devices[port_id];
2748 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2750 if (is_zero_ether_addr(addr)) {
2751 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2755 if (pool >= ETH_64_POOLS) {
2756 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2760 index = get_mac_addr_index(port_id, addr);
2762 index = get_mac_addr_index(port_id, &null_mac_addr);
2764 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2769 pool_mask = dev->data->mac_pool_sel[index];
2771 /* Check if both MAC address and pool is already there, and do nothing */
2772 if (pool_mask & (1ULL << pool))
2777 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2780 /* Update address in NIC data structure */
2781 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2783 /* Update pool bitmap in NIC data structure */
2784 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2787 return eth_err(port_id, ret);
2791 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2793 struct rte_eth_dev *dev;
2796 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2797 dev = &rte_eth_devices[port_id];
2798 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2800 index = get_mac_addr_index(port_id, addr);
2802 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2804 } else if (index < 0)
2805 return 0; /* Do nothing if address wasn't found */
2808 (*dev->dev_ops->mac_addr_remove)(dev, index);
2810 /* Update address in NIC data structure */
2811 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2813 /* reset pool bitmap */
2814 dev->data->mac_pool_sel[index] = 0;
2820 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2822 struct rte_eth_dev *dev;
2824 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2826 if (!is_valid_assigned_ether_addr(addr))
2829 dev = &rte_eth_devices[port_id];
2830 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2832 /* Update default address in NIC data structure */
2833 ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2835 (*dev->dev_ops->mac_addr_set)(dev, addr);
2842 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2846 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2848 struct rte_eth_dev_info dev_info;
2849 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2852 rte_eth_dev_info_get(port_id, &dev_info);
2853 if (!dev->data->hash_mac_addrs)
2856 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2857 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2858 ETHER_ADDR_LEN) == 0)
2865 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2870 struct rte_eth_dev *dev;
2872 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2874 dev = &rte_eth_devices[port_id];
2875 if (is_zero_ether_addr(addr)) {
2876 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2881 index = get_hash_mac_addr_index(port_id, addr);
2882 /* Check if it's already there, and do nothing */
2883 if ((index >= 0) && on)
2888 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2889 "set in UTA\n", port_id);
2893 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2895 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2901 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2902 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2904 /* Update address in NIC data structure */
2906 ether_addr_copy(addr,
2907 &dev->data->hash_mac_addrs[index]);
2909 ether_addr_copy(&null_mac_addr,
2910 &dev->data->hash_mac_addrs[index]);
2913 return eth_err(port_id, ret);
2917 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2919 struct rte_eth_dev *dev;
2921 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2923 dev = &rte_eth_devices[port_id];
2925 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2926 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
2930 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2933 struct rte_eth_dev *dev;
2934 struct rte_eth_dev_info dev_info;
2935 struct rte_eth_link link;
2937 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2939 dev = &rte_eth_devices[port_id];
2940 rte_eth_dev_info_get(port_id, &dev_info);
2941 link = dev->data->dev_link;
2943 if (queue_idx > dev_info.max_tx_queues) {
2944 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2945 "invalid queue id=%d\n", port_id, queue_idx);
2949 if (tx_rate > link.link_speed) {
2950 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2951 "bigger than link speed= %d\n",
2952 tx_rate, link.link_speed);
2956 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2957 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
2958 queue_idx, tx_rate));
2962 rte_eth_mirror_rule_set(uint16_t port_id,
2963 struct rte_eth_mirror_conf *mirror_conf,
2964 uint8_t rule_id, uint8_t on)
2966 struct rte_eth_dev *dev;
2968 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2969 if (mirror_conf->rule_type == 0) {
2970 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2974 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2975 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2980 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2981 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2982 (mirror_conf->pool_mask == 0)) {
2983 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2987 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2988 mirror_conf->vlan.vlan_mask == 0) {
2989 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2993 dev = &rte_eth_devices[port_id];
2994 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2996 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
2997 mirror_conf, rule_id, on));
3001 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3003 struct rte_eth_dev *dev;
3005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3007 dev = &rte_eth_devices[port_id];
3008 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3010 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3014 RTE_INIT(eth_dev_init_cb_lists)
3018 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3019 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3023 rte_eth_dev_callback_register(uint16_t port_id,
3024 enum rte_eth_event_type event,
3025 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3027 struct rte_eth_dev *dev;
3028 struct rte_eth_dev_callback *user_cb;
3029 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3035 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3036 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3040 if (port_id == RTE_ETH_ALL) {
3042 last_port = RTE_MAX_ETHPORTS - 1;
3044 next_port = last_port = port_id;
3047 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3050 dev = &rte_eth_devices[next_port];
3052 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3053 if (user_cb->cb_fn == cb_fn &&
3054 user_cb->cb_arg == cb_arg &&
3055 user_cb->event == event) {
3060 /* create a new callback. */
3061 if (user_cb == NULL) {
3062 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3063 sizeof(struct rte_eth_dev_callback), 0);
3064 if (user_cb != NULL) {
3065 user_cb->cb_fn = cb_fn;
3066 user_cb->cb_arg = cb_arg;
3067 user_cb->event = event;
3068 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3071 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3072 rte_eth_dev_callback_unregister(port_id, event,
3078 } while (++next_port <= last_port);
3080 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3085 rte_eth_dev_callback_unregister(uint16_t port_id,
3086 enum rte_eth_event_type event,
3087 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3090 struct rte_eth_dev *dev;
3091 struct rte_eth_dev_callback *cb, *next;
3092 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3098 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3099 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3103 if (port_id == RTE_ETH_ALL) {
3105 last_port = RTE_MAX_ETHPORTS - 1;
3107 next_port = last_port = port_id;
3110 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3113 dev = &rte_eth_devices[next_port];
3115 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3118 next = TAILQ_NEXT(cb, next);
3120 if (cb->cb_fn != cb_fn || cb->event != event ||
3121 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3125 * if this callback is not executing right now,
3128 if (cb->active == 0) {
3129 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3135 } while (++next_port <= last_port);
3137 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3142 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3143 enum rte_eth_event_type event, void *ret_param)
3145 struct rte_eth_dev_callback *cb_lst;
3146 struct rte_eth_dev_callback dev_cb;
3149 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3150 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3151 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3155 if (ret_param != NULL)
3156 dev_cb.ret_param = ret_param;
3158 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3159 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3160 dev_cb.cb_arg, dev_cb.ret_param);
3161 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3164 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3169 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3172 struct rte_eth_dev *dev;
3173 struct rte_intr_handle *intr_handle;
3177 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179 dev = &rte_eth_devices[port_id];
3181 if (!dev->intr_handle) {
3182 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3186 intr_handle = dev->intr_handle;
3187 if (!intr_handle->intr_vec) {
3188 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3192 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3193 vec = intr_handle->intr_vec[qid];
3194 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3195 if (rc && rc != -EEXIST) {
3196 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3197 " op %d epfd %d vec %u\n",
3198 port_id, qid, op, epfd, vec);
3205 const struct rte_memzone *
3206 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3207 uint16_t queue_id, size_t size, unsigned align,
3210 char z_name[RTE_MEMZONE_NAMESIZE];
3211 const struct rte_memzone *mz;
3213 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3214 dev->device->driver->name, ring_name,
3215 dev->data->port_id, queue_id);
3217 mz = rte_memzone_lookup(z_name);
3221 return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3225 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3226 int epfd, int op, void *data)
3229 struct rte_eth_dev *dev;
3230 struct rte_intr_handle *intr_handle;
3233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3235 dev = &rte_eth_devices[port_id];
3236 if (queue_id >= dev->data->nb_rx_queues) {
3237 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3241 if (!dev->intr_handle) {
3242 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3246 intr_handle = dev->intr_handle;
3247 if (!intr_handle->intr_vec) {
3248 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3252 vec = intr_handle->intr_vec[queue_id];
3253 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3254 if (rc && rc != -EEXIST) {
3255 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3256 " op %d epfd %d vec %u\n",
3257 port_id, queue_id, op, epfd, vec);
3265 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3268 struct rte_eth_dev *dev;
3270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3272 dev = &rte_eth_devices[port_id];
3274 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3275 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3280 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3283 struct rte_eth_dev *dev;
3285 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3287 dev = &rte_eth_devices[port_id];
3289 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3290 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3296 rte_eth_dev_filter_supported(uint16_t port_id,
3297 enum rte_filter_type filter_type)
3299 struct rte_eth_dev *dev;
3301 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3303 dev = &rte_eth_devices[port_id];
3304 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3305 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3306 RTE_ETH_FILTER_NOP, NULL);
3310 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3311 enum rte_filter_type filter_type,
3312 enum rte_filter_op filter_op, void *arg);
3315 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3316 enum rte_filter_type filter_type,
3317 enum rte_filter_op filter_op, void *arg)
3319 struct rte_eth_fdir_info_v22 {
3320 enum rte_fdir_mode mode;
3321 struct rte_eth_fdir_masks mask;
3322 struct rte_eth_fdir_flex_conf flex_conf;
3323 uint32_t guarant_spc;
3325 uint32_t flow_types_mask[1];
3326 uint32_t max_flexpayload;
3327 uint32_t flex_payload_unit;
3328 uint32_t max_flex_payload_segment_num;
3329 uint16_t flex_payload_limit;
3330 uint32_t flex_bitmask_unit;
3331 uint32_t max_flex_bitmask_num;
3334 struct rte_eth_hash_global_conf_v22 {
3335 enum rte_eth_hash_function hash_func;
3336 uint32_t sym_hash_enable_mask[1];
3337 uint32_t valid_bit_mask[1];
3340 struct rte_eth_hash_filter_info_v22 {
3341 enum rte_eth_hash_filter_info_type info_type;
3344 struct rte_eth_hash_global_conf_v22 global_conf;
3345 struct rte_eth_input_set_conf input_set_conf;
3349 struct rte_eth_dev *dev;
3351 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3353 dev = &rte_eth_devices[port_id];
3354 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3355 if (filter_op == RTE_ETH_FILTER_INFO) {
3357 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3358 struct rte_eth_fdir_info fdir_info;
3360 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3362 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3363 filter_op, (void *)&fdir_info);
3364 fdir_info_v22->mode = fdir_info.mode;
3365 fdir_info_v22->mask = fdir_info.mask;
3366 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3367 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3368 fdir_info_v22->best_spc = fdir_info.best_spc;
3369 fdir_info_v22->flow_types_mask[0] =
3370 (uint32_t)fdir_info.flow_types_mask[0];
3371 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3372 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3373 fdir_info_v22->max_flex_payload_segment_num =
3374 fdir_info.max_flex_payload_segment_num;
3375 fdir_info_v22->flex_payload_limit =
3376 fdir_info.flex_payload_limit;
3377 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3378 fdir_info_v22->max_flex_bitmask_num =
3379 fdir_info.max_flex_bitmask_num;
3381 } else if (filter_op == RTE_ETH_FILTER_GET) {
3383 struct rte_eth_hash_filter_info f_info;
3384 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3385 (struct rte_eth_hash_filter_info_v22 *)arg;
3387 f_info.info_type = f_info_v22->info_type;
3388 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3389 filter_op, (void *)&f_info);
3391 switch (f_info_v22->info_type) {
3392 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3393 f_info_v22->info.enable = f_info.info.enable;
3395 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3396 f_info_v22->info.global_conf.hash_func =
3397 f_info.info.global_conf.hash_func;
3398 f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3400 f_info.info.global_conf.sym_hash_enable_mask[0];
3401 f_info_v22->info.global_conf.valid_bit_mask[0] =
3403 f_info.info.global_conf.valid_bit_mask[0];
3405 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3406 f_info_v22->info.input_set_conf =
3407 f_info.info.input_set_conf;
3413 } else if (filter_op == RTE_ETH_FILTER_SET) {
3414 struct rte_eth_hash_filter_info f_info;
3415 struct rte_eth_hash_filter_info_v22 *f_v22 =
3416 (struct rte_eth_hash_filter_info_v22 *)arg;
3418 f_info.info_type = f_v22->info_type;
3419 switch (f_v22->info_type) {
3420 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3421 f_info.info.enable = f_v22->info.enable;
3423 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3424 f_info.info.global_conf.hash_func =
3425 f_v22->info.global_conf.hash_func;
3426 f_info.info.global_conf.sym_hash_enable_mask[0] =
3428 f_v22->info.global_conf.sym_hash_enable_mask[0];
3429 f_info.info.global_conf.valid_bit_mask[0] =
3431 f_v22->info.global_conf.valid_bit_mask[0];
3433 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3434 f_info.info.input_set_conf =
3435 f_v22->info.input_set_conf;
3440 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3443 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3446 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3449 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3450 enum rte_filter_type filter_type,
3451 enum rte_filter_op filter_op, void *arg);
3454 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3455 enum rte_filter_type filter_type,
3456 enum rte_filter_op filter_op, void *arg)
3458 struct rte_eth_dev *dev;
3460 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3462 dev = &rte_eth_devices[port_id];
3463 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3464 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3467 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3468 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3469 enum rte_filter_type filter_type,
3470 enum rte_filter_op filter_op, void *arg),
3471 rte_eth_dev_filter_ctrl_v1802);
3474 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3475 rte_rx_callback_fn fn, void *user_param)
3477 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3478 rte_errno = ENOTSUP;
3481 /* check input parameters */
3482 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3483 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3487 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3495 cb->param = user_param;
3497 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3498 /* Add the callbacks in fifo order. */
3499 struct rte_eth_rxtx_callback *tail =
3500 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3503 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3510 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3516 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3517 rte_rx_callback_fn fn, void *user_param)
3519 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3520 rte_errno = ENOTSUP;
3523 /* check input parameters */
3524 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3525 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3530 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3538 cb->param = user_param;
3540 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3541 /* Add the callbacks at fisrt position*/
3542 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3544 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3545 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3551 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3552 rte_tx_callback_fn fn, void *user_param)
3554 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3555 rte_errno = ENOTSUP;
3558 /* check input parameters */
3559 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3560 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3565 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3573 cb->param = user_param;
3575 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3576 /* Add the callbacks in fifo order. */
3577 struct rte_eth_rxtx_callback *tail =
3578 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3581 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3588 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3594 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3595 struct rte_eth_rxtx_callback *user_cb)
3597 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3600 /* Check input parameters. */
3601 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3602 if (user_cb == NULL ||
3603 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3606 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3607 struct rte_eth_rxtx_callback *cb;
3608 struct rte_eth_rxtx_callback **prev_cb;
3611 rte_spinlock_lock(&rte_eth_rx_cb_lock);
3612 prev_cb = &dev->post_rx_burst_cbs[queue_id];
3613 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3615 if (cb == user_cb) {
3616 /* Remove the user cb from the callback list. */
3617 *prev_cb = cb->next;
3622 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3628 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3629 struct rte_eth_rxtx_callback *user_cb)
3631 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3634 /* Check input parameters. */
3635 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3636 if (user_cb == NULL ||
3637 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3640 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3642 struct rte_eth_rxtx_callback *cb;
3643 struct rte_eth_rxtx_callback **prev_cb;
3645 rte_spinlock_lock(&rte_eth_tx_cb_lock);
3646 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3647 for (; *prev_cb != NULL; prev_cb = &cb->next) {
3649 if (cb == user_cb) {
3650 /* Remove the user cb from the callback list. */
3651 *prev_cb = cb->next;
3656 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3662 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3663 struct rte_eth_rxq_info *qinfo)
3665 struct rte_eth_dev *dev;
3667 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3672 dev = &rte_eth_devices[port_id];
3673 if (queue_id >= dev->data->nb_rx_queues) {
3674 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3678 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3680 memset(qinfo, 0, sizeof(*qinfo));
3681 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3686 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3687 struct rte_eth_txq_info *qinfo)
3689 struct rte_eth_dev *dev;
3691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3696 dev = &rte_eth_devices[port_id];
3697 if (queue_id >= dev->data->nb_tx_queues) {
3698 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3702 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3704 memset(qinfo, 0, sizeof(*qinfo));
3705 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3710 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3711 struct ether_addr *mc_addr_set,
3712 uint32_t nb_mc_addr)
3714 struct rte_eth_dev *dev;
3716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3718 dev = &rte_eth_devices[port_id];
3719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3720 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3721 mc_addr_set, nb_mc_addr));
3725 rte_eth_timesync_enable(uint16_t port_id)
3727 struct rte_eth_dev *dev;
3729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3730 dev = &rte_eth_devices[port_id];
3732 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3733 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3737 rte_eth_timesync_disable(uint16_t port_id)
3739 struct rte_eth_dev *dev;
3741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3742 dev = &rte_eth_devices[port_id];
3744 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3745 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3749 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3752 struct rte_eth_dev *dev;
3754 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3755 dev = &rte_eth_devices[port_id];
3757 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3758 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3759 (dev, timestamp, flags));
3763 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3764 struct timespec *timestamp)
3766 struct rte_eth_dev *dev;
3768 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3769 dev = &rte_eth_devices[port_id];
3771 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3772 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3777 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3779 struct rte_eth_dev *dev;
3781 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3782 dev = &rte_eth_devices[port_id];
3784 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3785 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3790 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3792 struct rte_eth_dev *dev;
3794 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3795 dev = &rte_eth_devices[port_id];
3797 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3798 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3803 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3805 struct rte_eth_dev *dev;
3807 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3808 dev = &rte_eth_devices[port_id];
3810 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3811 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3816 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3818 struct rte_eth_dev *dev;
3820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3822 dev = &rte_eth_devices[port_id];
3823 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3824 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3828 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3830 struct rte_eth_dev *dev;
3832 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3834 dev = &rte_eth_devices[port_id];
3835 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3836 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3840 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3842 struct rte_eth_dev *dev;
3844 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3846 dev = &rte_eth_devices[port_id];
3847 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3848 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3852 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3854 struct rte_eth_dev *dev;
3856 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3858 dev = &rte_eth_devices[port_id];
3859 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3860 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3864 rte_eth_dev_get_dcb_info(uint16_t port_id,
3865 struct rte_eth_dcb_info *dcb_info)
3867 struct rte_eth_dev *dev;
3869 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3871 dev = &rte_eth_devices[port_id];
3872 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3874 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3875 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3879 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3880 struct rte_eth_l2_tunnel_conf *l2_tunnel)
3882 struct rte_eth_dev *dev;
3884 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3885 if (l2_tunnel == NULL) {
3886 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3890 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3891 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3895 dev = &rte_eth_devices[port_id];
3896 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3898 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3903 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3904 struct rte_eth_l2_tunnel_conf *l2_tunnel,
3908 struct rte_eth_dev *dev;
3910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3912 if (l2_tunnel == NULL) {
3913 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3917 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3918 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3923 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3927 dev = &rte_eth_devices[port_id];
3928 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3930 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3931 l2_tunnel, mask, en));
3935 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3936 const struct rte_eth_desc_lim *desc_lim)
3938 if (desc_lim->nb_align != 0)
3939 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3941 if (desc_lim->nb_max != 0)
3942 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3944 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3948 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3949 uint16_t *nb_rx_desc,
3950 uint16_t *nb_tx_desc)
3952 struct rte_eth_dev *dev;
3953 struct rte_eth_dev_info dev_info;
3955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3957 dev = &rte_eth_devices[port_id];
3958 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3960 rte_eth_dev_info_get(port_id, &dev_info);
3962 if (nb_rx_desc != NULL)
3963 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3965 if (nb_tx_desc != NULL)
3966 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3972 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3974 struct rte_eth_dev *dev;
3976 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3981 dev = &rte_eth_devices[port_id];
3983 if (*dev->dev_ops->pool_ops_supported == NULL)
3984 return 1; /* all pools are supported */
3986 return (*dev->dev_ops->pool_ops_supported)(dev, pool);