4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * version: DPDK.L.1.2.3-3
36 #include <sys/types.h>
37 #include <sys/queue.h>
47 #include <rte_byteorder.h>
49 #include <rte_debug.h>
50 #include <rte_interrupts.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_common.h>
64 #include <rte_mempool.h>
65 #include <rte_malloc.h>
67 #include <rte_errno.h>
68 #include <rte_spinlock.h>
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do { \
75 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
78 #define PMD_DEBUG_TRACE(fmt, args...)
81 /* define two macros for quick checking for restricting functions to primary
82 * instance only. First macro is for functions returning an int - and therefore
83 * an error code, second macro is for functions returning null.
85 #define PROC_PRIMARY_OR_ERR() do { \
86 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
87 PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \
89 return (-E_RTE_SECONDARY); \
93 #define PROC_PRIMARY_OR_RET() do { \
94 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
95 PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \
101 /* Macros to check for invlaid function pointers in dev_ops structure */
102 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
103 if ((func) == NULL) { \
104 PMD_DEBUG_TRACE("Function not supported\n"); \
108 #define FUNC_PTR_OR_RET(func) do { \
109 if ((func) == NULL) { \
110 PMD_DEBUG_TRACE("Function not supported\n"); \
115 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
116 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
117 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
118 static uint8_t nb_ports = 0;
120 /* spinlock for eth device callbacks */
121 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
124 * The user application callback description.
126 * It contains callback address to be registered by user application,
127 * the pointer to the parameters for callback, and the event type.
129 struct rte_eth_dev_callback {
130 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
131 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
132 void *cb_arg; /**< Parameter for callback */
133 enum rte_eth_event_type event; /**< Interrupt event type */
137 rte_eth_dev_data_alloc(void)
139 const unsigned flags = 0;
140 const struct rte_memzone *mz;
142 if (rte_eal_process_type() == RTE_PROC_PRIMARY){
143 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
144 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
145 rte_socket_id(), flags);
147 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
149 rte_panic("Cannot allocate memzone for ethernet port data\n");
151 rte_eth_dev_data = mz->addr;
152 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
153 memset(rte_eth_dev_data, 0,
154 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
157 static inline struct rte_eth_dev *
158 rte_eth_dev_allocate(void)
160 struct rte_eth_dev *eth_dev;
162 if (nb_ports == RTE_MAX_ETHPORTS)
165 if (rte_eth_dev_data == NULL)
166 rte_eth_dev_data_alloc();
168 eth_dev = &rte_eth_devices[nb_ports];
169 eth_dev->data = &rte_eth_dev_data[nb_ports];
170 eth_dev->data->port_id = nb_ports++;
175 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
176 struct rte_pci_device *pci_dev)
178 struct eth_driver *eth_drv;
179 struct rte_eth_dev *eth_dev;
182 eth_drv = (struct eth_driver *)pci_drv;
184 eth_dev = rte_eth_dev_allocate();
189 if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
191 eth_drv->dev_private_size,
193 if (eth_dev->data->dev_private == NULL)
196 eth_dev->pci_dev = pci_dev;
197 eth_dev->driver = eth_drv;
198 eth_dev->data->rx_mbuf_alloc_failed = 0;
200 /* init user callbacks */
201 TAILQ_INIT(&(eth_dev->callbacks));
204 * Set the default maximum frame size.
206 eth_dev->data->max_frame_size = ETHER_MAX_LEN;
208 /* Invoke PMD device initialization function */
209 diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
213 PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
214 " failed\n", pci_drv->name,
215 (unsigned) pci_dev->id.vendor_id,
216 (unsigned) pci_dev->id.device_id);
217 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218 rte_free(eth_dev->data->dev_private);
224 * Register an Ethernet [Poll Mode] driver.
226 * Function invoked by the initialization function of an Ethernet driver
227 * to simultaneously register itself as a PCI driver and as an Ethernet
229 * Invokes the rte_eal_pci_register() function to register the *pci_drv*
230 * structure embedded in the *eth_drv* structure, after having stored the
231 * address of the rte_eth_dev_init() function in the *devinit* field of
232 * the *pci_drv* structure.
233 * During the PCI probing phase, the rte_eth_dev_init() function is
234 * invoked for each PCI [Ethernet device] matching the embedded PCI
235 * identifiers provided by the driver.
238 rte_eth_driver_register(struct eth_driver *eth_drv)
240 eth_drv->pci_drv.devinit = rte_eth_dev_init;
241 rte_eal_pci_register(ð_drv->pci_drv);
245 rte_eth_dev_count(void)
251 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
252 const struct rte_eth_conf *dev_conf)
254 struct rte_eth_dev *dev;
255 struct rte_eth_dev_info dev_info;
258 /* This function is only safe when called from the primary process
259 * in a multi-process setup*/
260 PROC_PRIMARY_OR_ERR();
262 if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
263 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
266 dev = &rte_eth_devices[port_id];
268 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
269 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
271 if (dev->data->dev_started) {
273 "port %d must be stopped to allow configuration", port_id);
278 * Check that the numbers of RX and TX queues are not greater
279 * than the maximum number of RX and TX queues supported by the
282 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
283 if (nb_rx_q > dev_info.max_rx_queues) {
284 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d",
285 port_id, nb_rx_q, dev_info.max_rx_queues);
289 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0", port_id);
293 if (nb_tx_q > dev_info.max_tx_queues) {
294 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d",
295 port_id, nb_tx_q, dev_info.max_tx_queues);
299 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0", port_id);
303 /* Copy the dev_conf parameter into the dev structure */
304 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
307 * If jumbo frames are enabled, check that the maximum RX packet
308 * length is supported by the configured device.
310 if (dev_conf->rxmode.jumbo_frame == 1) {
311 if (dev_conf->rxmode.max_rx_pkt_len >
312 dev_info.max_rx_pktlen) {
313 PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
314 " > max valid value %u",
316 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
317 (unsigned)dev_info.max_rx_pktlen);
321 /* Use default value */
322 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
324 /* For vmdb+dcb mode check our configuration before we go further */
325 if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) {
326 const struct rte_eth_vmdq_dcb_conf *conf;
328 if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
329 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
331 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
334 conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
335 if (! (conf->nb_queue_pools == ETH_16_POOLS ||
336 conf->nb_queue_pools == ETH_32_POOLS)) {
337 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
338 "nb_queue_pools != %d or nb_queue_pools "
340 port_id, ETH_16_POOLS, ETH_32_POOLS);
345 diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q);
347 rte_free(dev->data->rx_queues);
348 rte_free(dev->data->tx_queues);
354 rte_eth_dev_config_restore(uint8_t port_id)
356 struct rte_eth_dev *dev;
357 struct rte_eth_dev_info dev_info;
358 struct ether_addr addr;
361 dev = &rte_eth_devices[port_id];
363 rte_eth_dev_info_get(port_id, &dev_info);
365 /* replay MAC address configuration */
366 for (i = 0; i < dev_info.max_mac_addrs; i++) {
367 addr = dev->data->mac_addrs[i];
369 /* skip zero address */
370 if (is_zero_ether_addr(&addr))
373 /* add address to the hardware */
374 if (*dev->dev_ops->mac_addr_add)
375 (*dev->dev_ops->mac_addr_add)(dev, &addr, i, 0);
377 PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
379 /* exit the loop but not return an error */
384 /* replay promiscuous configuration */
385 if (rte_eth_promiscuous_get(port_id) == 1)
386 rte_eth_promiscuous_enable(port_id);
387 else if (rte_eth_promiscuous_get(port_id) == 0)
388 rte_eth_promiscuous_disable(port_id);
390 /* replay allmulticast configuration */
391 if (rte_eth_allmulticast_get(port_id) == 1)
392 rte_eth_allmulticast_enable(port_id);
393 else if (rte_eth_allmulticast_get(port_id) == 0)
394 rte_eth_allmulticast_disable(port_id);
398 rte_eth_dev_start(uint8_t port_id)
400 struct rte_eth_dev *dev;
403 /* This function is only safe when called from the primary process
404 * in a multi-process setup*/
405 PROC_PRIMARY_OR_ERR();
407 if (port_id >= nb_ports) {
408 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
411 dev = &rte_eth_devices[port_id];
413 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
414 diag = (*dev->dev_ops->dev_start)(dev);
416 dev->data->dev_started = 1;
420 rte_eth_dev_config_restore(port_id);
426 rte_eth_dev_stop(uint8_t port_id)
428 struct rte_eth_dev *dev;
430 /* This function is only safe when called from the primary process
431 * in a multi-process setup*/
432 PROC_PRIMARY_OR_RET();
434 if (port_id >= nb_ports) {
435 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
438 dev = &rte_eth_devices[port_id];
440 FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
441 dev->data->dev_started = 0;
442 (*dev->dev_ops->dev_stop)(dev);
446 rte_eth_dev_close(uint8_t port_id)
448 struct rte_eth_dev *dev;
450 /* This function is only safe when called from the primary process
451 * in a multi-process setup*/
452 PROC_PRIMARY_OR_RET();
454 if (port_id >= nb_ports) {
455 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
459 dev = &rte_eth_devices[port_id];
460 FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
461 dev->data->dev_started = 0;
462 (*dev->dev_ops->dev_close)(dev);
466 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
467 uint16_t nb_rx_desc, unsigned int socket_id,
468 const struct rte_eth_rxconf *rx_conf,
469 struct rte_mempool *mp)
471 struct rte_eth_dev *dev;
472 struct rte_pktmbuf_pool_private *mbp_priv;
473 struct rte_eth_dev_info dev_info;
475 /* This function is only safe when called from the primary process
476 * in a multi-process setup*/
477 PROC_PRIMARY_OR_ERR();
479 if (port_id >= nb_ports) {
480 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
483 dev = &rte_eth_devices[port_id];
484 if (rx_queue_id >= dev->data->nb_rx_queues) {
485 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
489 if (dev->data->dev_started) {
491 "port %d must be stopped to allow configuration", port_id);
495 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
496 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
499 * Check the size of the mbuf data buffer.
500 * This value must be provided in the private data of the memory pool.
501 * First check that the memory pool has a valid private data.
503 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
504 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
505 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
506 mp->name, (int) mp->private_data_size,
507 (int) sizeof(struct rte_pktmbuf_pool_private));
510 mbp_priv = (struct rte_pktmbuf_pool_private *)
511 ((char *)mp + sizeof(struct rte_mempool));
512 if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
513 dev_info.min_rx_bufsize) {
514 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
515 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
518 (int)mbp_priv->mbuf_data_room_size,
519 (int)(RTE_PKTMBUF_HEADROOM +
520 dev_info.min_rx_bufsize),
521 (int)RTE_PKTMBUF_HEADROOM,
522 (int)dev_info.min_rx_bufsize);
526 return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
527 socket_id, rx_conf, mp);
531 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
532 uint16_t nb_tx_desc, unsigned int socket_id,
533 const struct rte_eth_txconf *tx_conf)
535 struct rte_eth_dev *dev;
537 /* This function is only safe when called from the primary process
538 * in a multi-process setup*/
539 PROC_PRIMARY_OR_ERR();
541 if (port_id >= nb_ports) {
542 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
545 dev = &rte_eth_devices[port_id];
546 if (tx_queue_id >= dev->data->nb_tx_queues) {
547 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
551 if (dev->data->dev_started) {
553 "port %d must be stopped to allow configuration", port_id);
557 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
558 return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
563 rte_eth_promiscuous_enable(uint8_t port_id)
565 struct rte_eth_dev *dev;
567 if (port_id >= nb_ports) {
568 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
571 dev = &rte_eth_devices[port_id];
573 FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
574 (*dev->dev_ops->promiscuous_enable)(dev);
575 dev->data->promiscuous = 1;
579 rte_eth_promiscuous_disable(uint8_t port_id)
581 struct rte_eth_dev *dev;
583 if (port_id >= nb_ports) {
584 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
587 dev = &rte_eth_devices[port_id];
589 FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
590 dev->data->promiscuous = 0;
591 (*dev->dev_ops->promiscuous_disable)(dev);
595 rte_eth_promiscuous_get(uint8_t port_id)
597 struct rte_eth_dev *dev;
599 if (port_id >= nb_ports) {
600 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
604 dev = &rte_eth_devices[port_id];
605 return dev->data->promiscuous;
609 rte_eth_allmulticast_enable(uint8_t port_id)
611 struct rte_eth_dev *dev;
613 if (port_id >= nb_ports) {
614 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
617 dev = &rte_eth_devices[port_id];
619 FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
620 (*dev->dev_ops->allmulticast_enable)(dev);
621 dev->data->all_multicast = 1;
625 rte_eth_allmulticast_disable(uint8_t port_id)
627 struct rte_eth_dev *dev;
629 if (port_id >= nb_ports) {
630 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
633 dev = &rte_eth_devices[port_id];
635 FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
636 dev->data->all_multicast = 0;
637 (*dev->dev_ops->allmulticast_disable)(dev);
641 rte_eth_allmulticast_get(uint8_t port_id)
643 struct rte_eth_dev *dev;
645 if (port_id >= nb_ports) {
646 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
650 dev = &rte_eth_devices[port_id];
651 return dev->data->all_multicast;
655 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
656 struct rte_eth_link *link)
658 struct rte_eth_link *dst = link;
659 struct rte_eth_link *src = &(dev->data->dev_link);
661 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
662 *(uint64_t *)src) == 0)
669 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
671 struct rte_eth_dev *dev;
673 if (port_id >= nb_ports) {
674 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
677 dev = &rte_eth_devices[port_id];
678 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
680 if (dev->data->dev_conf.intr_conf.lsc != 0)
681 rte_eth_dev_atomic_read_link_status(dev, eth_link);
683 (*dev->dev_ops->link_update)(dev, 1);
684 *eth_link = dev->data->dev_link;
689 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
691 struct rte_eth_dev *dev;
693 if (port_id >= nb_ports) {
694 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
697 dev = &rte_eth_devices[port_id];
698 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
700 if (dev->data->dev_conf.intr_conf.lsc != 0)
701 rte_eth_dev_atomic_read_link_status(dev, eth_link);
703 (*dev->dev_ops->link_update)(dev, 0);
704 *eth_link = dev->data->dev_link;
709 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
711 struct rte_eth_dev *dev;
713 if (port_id >= nb_ports) {
714 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
717 dev = &rte_eth_devices[port_id];
719 FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
720 (*dev->dev_ops->stats_get)(dev, stats);
721 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
725 rte_eth_stats_reset(uint8_t port_id)
727 struct rte_eth_dev *dev;
729 if (port_id >= nb_ports) {
730 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
733 dev = &rte_eth_devices[port_id];
735 FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
736 (*dev->dev_ops->stats_reset)(dev);
740 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
742 struct rte_eth_dev *dev;
744 if (port_id >= nb_ports) {
745 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
748 dev = &rte_eth_devices[port_id];
750 FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
751 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
752 dev_info->pci_dev = dev->pci_dev;
753 dev_info->driver_name = dev->driver->pci_drv.name;
757 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
759 struct rte_eth_dev *dev;
761 if (port_id >= nb_ports) {
762 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
765 dev = &rte_eth_devices[port_id];
766 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
770 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
772 struct rte_eth_dev *dev;
774 if (port_id >= nb_ports) {
775 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
778 dev = &rte_eth_devices[port_id];
779 if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
780 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
783 if (vlan_id > 4095) {
784 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
785 port_id, (unsigned) vlan_id);
789 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
790 (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
795 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
796 struct rte_fdir_filter *fdir_filter,
799 struct rte_eth_dev *dev;
801 if (port_id >= nb_ports) {
802 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
806 dev = &rte_eth_devices[port_id];
808 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
809 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
810 port_id, dev->data->dev_conf.fdir_conf.mode);
814 if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
815 || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
816 && (fdir_filter->port_src || fdir_filter->port_dst)) {
817 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
818 "None l4type source & destinations ports " \
823 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
824 if (*dev->dev_ops->fdir_add_signature_filter)
825 return (*dev->dev_ops->fdir_add_signature_filter)(dev,
829 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
834 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
835 struct rte_fdir_filter *fdir_filter,
838 struct rte_eth_dev *dev;
840 if (port_id >= nb_ports) {
841 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
845 dev = &rte_eth_devices[port_id];
847 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
848 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
849 port_id, dev->data->dev_conf.fdir_conf.mode);
853 if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
854 || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
855 && (fdir_filter->port_src || fdir_filter->port_dst)) {
856 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
857 "None l4type source & destinations ports " \
862 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
863 if (*dev->dev_ops->fdir_update_signature_filter)
864 return (*dev->dev_ops->fdir_update_signature_filter)(dev,
869 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
874 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
875 struct rte_fdir_filter *fdir_filter)
877 struct rte_eth_dev *dev;
879 if (port_id >= nb_ports) {
880 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
884 dev = &rte_eth_devices[port_id];
886 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
887 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
888 port_id, dev->data->dev_conf.fdir_conf.mode);
892 if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
893 || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
894 && (fdir_filter->port_src || fdir_filter->port_dst)) {
895 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
896 "None l4type source & destinations ports " \
901 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
902 if (*dev->dev_ops->fdir_remove_signature_filter)
903 return (*dev->dev_ops->fdir_remove_signature_filter)(dev,
906 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
911 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
913 struct rte_eth_dev *dev;
915 if (port_id >= nb_ports) {
916 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
920 dev = &rte_eth_devices[port_id];
921 if (! (dev->data->dev_conf.fdir_conf.mode)) {
922 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
926 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
927 if (*dev->dev_ops->fdir_infos_get) {
928 (*dev->dev_ops->fdir_infos_get)(dev, fdir);
932 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
937 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
938 struct rte_fdir_filter *fdir_filter,
939 uint16_t soft_id, uint8_t queue,
942 struct rte_eth_dev *dev;
944 if (port_id >= nb_ports) {
945 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
949 dev = &rte_eth_devices[port_id];
951 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
952 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
953 port_id, dev->data->dev_conf.fdir_conf.mode);
957 if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
958 || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
959 && (fdir_filter->port_src || fdir_filter->port_dst)) {
960 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
961 "None l4type source & destinations ports " \
966 /* For now IPv6 is not supported with perfect filter */
967 if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
970 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
971 if (*dev->dev_ops->fdir_add_perfect_filter)
972 return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
976 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
981 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
982 struct rte_fdir_filter *fdir_filter,
983 uint16_t soft_id, uint8_t queue,
986 struct rte_eth_dev *dev;
988 if (port_id >= nb_ports) {
989 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
993 dev = &rte_eth_devices[port_id];
995 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
996 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
997 port_id, dev->data->dev_conf.fdir_conf.mode);
1001 if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1002 || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1003 && (fdir_filter->port_src || fdir_filter->port_dst)) {
1004 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1005 "None l4type source & destinations ports " \
1010 /* For now IPv6 is not supported with perfect filter */
1011 if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1014 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1015 if (*dev->dev_ops->fdir_update_perfect_filter)
1016 return (*dev->dev_ops->fdir_update_perfect_filter)(dev,
1022 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
1027 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1028 struct rte_fdir_filter *fdir_filter,
1031 struct rte_eth_dev *dev;
1033 if (port_id >= nb_ports) {
1034 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1038 dev = &rte_eth_devices[port_id];
1040 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1041 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1042 port_id, dev->data->dev_conf.fdir_conf.mode);
1046 if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1047 || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1048 && (fdir_filter->port_src || fdir_filter->port_dst)) {
1049 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1050 "None l4type source & destinations ports " \
1055 /* For now IPv6 is not supported with perfect filter */
1056 if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1059 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1060 if (*dev->dev_ops->fdir_remove_perfect_filter)
1061 return (*dev->dev_ops->fdir_remove_perfect_filter)(dev,
1065 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
1070 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1072 struct rte_eth_dev *dev;
1074 if (port_id >= nb_ports) {
1075 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1079 dev = &rte_eth_devices[port_id];
1080 if (! (dev->data->dev_conf.fdir_conf.mode)) {
1081 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1085 /* IPv6 mask are not supported */
1086 if (fdir_mask->src_ipv6_mask)
1089 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1090 if (*dev->dev_ops->fdir_set_masks)
1091 return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1093 PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n",
1099 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1101 struct rte_eth_dev *dev;
1103 if (port_id >= nb_ports) {
1104 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1108 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1109 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1113 dev = &rte_eth_devices[port_id];
1115 /* High water, low water validation are device specific */
1116 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1117 if (*dev->dev_ops->flow_ctrl_set)
1118 return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1124 rte_eth_led_on(uint8_t port_id)
1126 struct rte_eth_dev *dev;
1128 if (port_id >= nb_ports) {
1129 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1133 dev = &rte_eth_devices[port_id];
1135 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1136 return ((*dev->dev_ops->dev_led_on)(dev));
1140 rte_eth_led_off(uint8_t port_id)
1142 struct rte_eth_dev *dev;
1144 if (port_id >= nb_ports) {
1145 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1149 dev = &rte_eth_devices[port_id];
1151 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1152 return ((*dev->dev_ops->dev_led_off)(dev));
1156 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1160 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1162 struct rte_eth_dev_info dev_info;
1163 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1166 rte_eth_dev_info_get(port_id, &dev_info);
1168 for (i = 0; i < dev_info.max_mac_addrs; i++)
1169 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1175 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1178 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1181 struct rte_eth_dev *dev;
1184 if (port_id >= nb_ports) {
1185 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1188 dev = &rte_eth_devices[port_id];
1190 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1191 if (is_zero_ether_addr(addr)) {
1192 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id);
1196 /* Check if it's already there, and do nothing */
1197 index = get_mac_addr_index(port_id, addr);
1201 index = get_mac_addr_index(port_id, &null_mac_addr);
1203 PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id);
1208 (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1210 /* Update address in NIC data structure */
1211 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1217 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1219 struct rte_eth_dev *dev;
1222 if (port_id >= nb_ports) {
1223 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1226 dev = &rte_eth_devices[port_id];
1228 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1229 index = get_mac_addr_index(port_id, addr);
1231 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1232 return (-EADDRINUSE);
1233 } else if (index < 0)
1234 return 0; /* Do nothing if address wasn't found */
1237 (*dev->dev_ops->mac_addr_remove)(dev, index);
1239 /* Update address in NIC data structure */
1240 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1245 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1247 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
1248 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1250 struct rte_eth_dev *dev;
1252 if (port_id >= nb_ports) {
1253 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1256 dev = &rte_eth_devices[port_id];
1258 FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
1259 if (queue_id >= dev->data->nb_rx_queues) {
1260 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
1263 return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
1268 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
1269 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1271 struct rte_eth_dev *dev;
1273 if (port_id >= nb_ports) {
1274 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1277 dev = &rte_eth_devices[port_id];
1279 FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
1280 if (queue_id >= dev->data->nb_tx_queues) {
1281 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
1284 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
1290 rte_eth_dev_callback_register(uint8_t port_id,
1291 enum rte_eth_event_type event,
1292 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1295 struct rte_eth_dev *dev;
1296 struct rte_eth_dev_callback *user_cb = NULL;
1300 if (port_id >= nb_ports) {
1301 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1304 dev = &rte_eth_devices[port_id];
1305 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1306 TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
1307 if (user_cb->cb_fn == cb_fn &&
1308 user_cb->cb_arg == cb_arg &&
1309 user_cb->event == event) {
1314 user_cb = rte_malloc("INTR_USER_CALLBACK",
1315 sizeof(struct rte_eth_dev_callback), 0);
1318 user_cb->cb_fn = cb_fn;
1319 user_cb->cb_arg = cb_arg;
1320 user_cb->event = event;
1321 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
1325 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1331 rte_eth_dev_callback_unregister(uint8_t port_id,
1332 enum rte_eth_event_type event,
1333 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1336 struct rte_eth_dev *dev;
1337 struct rte_eth_dev_callback *cb_lst = NULL;
1341 if (port_id >= nb_ports) {
1342 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1345 dev = &rte_eth_devices[port_id];
1346 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1347 TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
1348 if (cb_lst->cb_fn != cb_fn || cb_lst->event != event)
1350 if (cb_lst->cb_arg == (void *)-1 ||
1351 cb_lst->cb_arg == cb_arg) {
1352 TAILQ_REMOVE(&(dev->callbacks), cb_lst, next);
1358 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1364 _rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event)
1366 struct rte_eth_dev_callback *cb_lst = NULL;
1367 struct rte_eth_dev_callback dev_cb;
1369 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1370 TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
1371 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1374 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1375 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
1377 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1379 rte_spinlock_unlock(&rte_eth_dev_cb_lock);