ethdev: clean port id retrieval when attaching
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
145         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
146         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
147                 rx_nombuf)},
148 };
149
150 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
151
152 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
153         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
154         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
155         {"errors", offsetof(struct rte_eth_stats, q_errors)},
156 };
157
158 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
159                 sizeof(rte_rxq_stats_strings[0]))
160
161 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
162         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
163         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
164 };
165 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
166                 sizeof(rte_txq_stats_strings[0]))
167
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 enum {
189         DEV_DETACHED = 0,
190         DEV_ATTACHED
191 };
192
193 static void
194 rte_eth_dev_data_alloc(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
200                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
202                                 rte_socket_id(), flags);
203         } else
204                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
205         if (mz == NULL)
206                 rte_panic("Cannot allocate memzone for ethernet port data\n");
207
208         rte_eth_dev_data = mz->addr;
209         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210                 memset(rte_eth_dev_data, 0,
211                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
212 }
213
214 struct rte_eth_dev *
215 rte_eth_dev_allocated(const char *name)
216 {
217         unsigned i;
218
219         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
220                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
221                     strcmp(rte_eth_devices[i].data->name, name) == 0)
222                         return &rte_eth_devices[i];
223         }
224         return NULL;
225 }
226
227 static uint8_t
228 rte_eth_dev_find_free_port(void)
229 {
230         unsigned i;
231
232         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
233                 if (rte_eth_devices[i].attached == DEV_DETACHED)
234                         return i;
235         }
236         return RTE_MAX_ETHPORTS;
237 }
238
239 struct rte_eth_dev *
240 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
241 {
242         uint8_t port_id;
243         struct rte_eth_dev *eth_dev;
244
245         port_id = rte_eth_dev_find_free_port();
246         if (port_id == RTE_MAX_ETHPORTS) {
247                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
248                 return NULL;
249         }
250
251         if (rte_eth_dev_data == NULL)
252                 rte_eth_dev_data_alloc();
253
254         if (rte_eth_dev_allocated(name) != NULL) {
255                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
256                                 name);
257                 return NULL;
258         }
259
260         eth_dev = &rte_eth_devices[port_id];
261         eth_dev->data = &rte_eth_dev_data[port_id];
262         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
263         eth_dev->data->port_id = port_id;
264         eth_dev->attached = DEV_ATTACHED;
265         eth_dev->dev_type = type;
266         nb_ports++;
267         return eth_dev;
268 }
269
270 static int
271 rte_eth_dev_create_unique_device_name(char *name, size_t size,
272                 struct rte_pci_device *pci_dev)
273 {
274         int ret;
275
276         if ((name == NULL) || (pci_dev == NULL))
277                 return -EINVAL;
278
279         ret = snprintf(name, size, "%d:%d.%d",
280                         pci_dev->addr.bus, pci_dev->addr.devid,
281                         pci_dev->addr.function);
282         if (ret < 0)
283                 return ret;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
289 {
290         if (eth_dev == NULL)
291                 return -EINVAL;
292
293         eth_dev->attached = DEV_DETACHED;
294         nb_ports--;
295         return 0;
296 }
297
298 static int
299 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
300                  struct rte_pci_device *pci_dev)
301 {
302         struct eth_driver    *eth_drv;
303         struct rte_eth_dev *eth_dev;
304         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
305
306         int diag;
307
308         eth_drv = (struct eth_driver *)pci_drv;
309
310         /* Create unique Ethernet device name using PCI address */
311         rte_eth_dev_create_unique_device_name(ethdev_name,
312                         sizeof(ethdev_name), pci_dev);
313
314         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
315         if (eth_dev == NULL)
316                 return -ENOMEM;
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
319                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
320                                   eth_drv->dev_private_size,
321                                   RTE_CACHE_LINE_SIZE);
322                 if (eth_dev->data->dev_private == NULL)
323                         rte_panic("Cannot allocate memzone for private port data\n");
324         }
325         eth_dev->pci_dev = pci_dev;
326         eth_dev->driver = eth_drv;
327         eth_dev->data->rx_mbuf_alloc_failed = 0;
328
329         /* init user callbacks */
330         TAILQ_INIT(&(eth_dev->link_intr_cbs));
331
332         /*
333          * Set the default MTU.
334          */
335         eth_dev->data->mtu = ETHER_MTU;
336
337         /* Invoke PMD device initialization function */
338         diag = (*eth_drv->eth_dev_init)(eth_dev);
339         if (diag == 0)
340                 return 0;
341
342         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
343                         pci_drv->name,
344                         (unsigned) pci_dev->id.vendor_id,
345                         (unsigned) pci_dev->id.device_id);
346         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
347                 rte_free(eth_dev->data->dev_private);
348         rte_eth_dev_release_port(eth_dev);
349         return diag;
350 }
351
352 static int
353 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
354 {
355         const struct eth_driver *eth_drv;
356         struct rte_eth_dev *eth_dev;
357         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
358         int ret;
359
360         if (pci_dev == NULL)
361                 return -EINVAL;
362
363         /* Create unique Ethernet device name using PCI address */
364         rte_eth_dev_create_unique_device_name(ethdev_name,
365                         sizeof(ethdev_name), pci_dev);
366
367         eth_dev = rte_eth_dev_allocated(ethdev_name);
368         if (eth_dev == NULL)
369                 return -ENODEV;
370
371         eth_drv = (const struct eth_driver *)pci_dev->driver;
372
373         /* Invoke PMD device uninit function */
374         if (*eth_drv->eth_dev_uninit) {
375                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
376                 if (ret)
377                         return ret;
378         }
379
380         /* free ether device */
381         rte_eth_dev_release_port(eth_dev);
382
383         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
384                 rte_free(eth_dev->data->dev_private);
385
386         eth_dev->pci_dev = NULL;
387         eth_dev->driver = NULL;
388         eth_dev->data = NULL;
389
390         return 0;
391 }
392
393 /**
394  * Register an Ethernet [Poll Mode] driver.
395  *
396  * Function invoked by the initialization function of an Ethernet driver
397  * to simultaneously register itself as a PCI driver and as an Ethernet
398  * Poll Mode Driver.
399  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
400  * structure embedded in the *eth_drv* structure, after having stored the
401  * address of the rte_eth_dev_init() function in the *devinit* field of
402  * the *pci_drv* structure.
403  * During the PCI probing phase, the rte_eth_dev_init() function is
404  * invoked for each PCI [Ethernet device] matching the embedded PCI
405  * identifiers provided by the driver.
406  */
407 void
408 rte_eth_driver_register(struct eth_driver *eth_drv)
409 {
410         eth_drv->pci_drv.devinit = rte_eth_dev_init;
411         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
412         rte_eal_pci_register(&eth_drv->pci_drv);
413 }
414
415 int
416 rte_eth_dev_is_valid_port(uint8_t port_id)
417 {
418         if (port_id >= RTE_MAX_ETHPORTS ||
419             rte_eth_devices[port_id].attached != DEV_ATTACHED)
420                 return 0;
421         else
422                 return 1;
423 }
424
425 int
426 rte_eth_dev_socket_id(uint8_t port_id)
427 {
428         if (!rte_eth_dev_is_valid_port(port_id))
429                 return -1;
430         return rte_eth_devices[port_id].pci_dev->numa_node;
431 }
432
433 uint8_t
434 rte_eth_dev_count(void)
435 {
436         return nb_ports;
437 }
438
439 static enum rte_eth_dev_type
440 rte_eth_dev_get_device_type(uint8_t port_id)
441 {
442         if (!rte_eth_dev_is_valid_port(port_id))
443                 return RTE_ETH_DEV_UNKNOWN;
444         return rte_eth_devices[port_id].dev_type;
445 }
446
447 static int
448 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
449 {
450         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
451
452         if (addr == NULL) {
453                 PMD_DEBUG_TRACE("Null pointer is specified\n");
454                 return -EINVAL;
455         }
456
457         *addr = rte_eth_devices[port_id].pci_dev->addr;
458         return 0;
459 }
460
461 static int
462 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
463 {
464         char *tmp;
465
466         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
467
468         if (name == NULL) {
469                 PMD_DEBUG_TRACE("Null pointer is specified\n");
470                 return -EINVAL;
471         }
472
473         /* shouldn't check 'rte_eth_devices[i].data',
474          * because it might be overwritten by VDEV PMD */
475         tmp = rte_eth_dev_data[port_id].name;
476         strcpy(name, tmp);
477         return 0;
478 }
479
480 static int
481 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
482 {
483         int i;
484
485         if (name == NULL) {
486                 PMD_DEBUG_TRACE("Null pointer is specified\n");
487                 return -EINVAL;
488         }
489
490         *port_id = RTE_MAX_ETHPORTS;
491
492         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
493
494                 if (!strncmp(name,
495                         rte_eth_dev_data[i].name, strlen(name))) {
496
497                         *port_id = i;
498
499                         return 0;
500                 }
501         }
502         return -ENODEV;
503 }
504
505 static int
506 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
507 {
508         int i;
509         struct rte_pci_device *pci_dev = NULL;
510
511         if (addr == NULL) {
512                 PMD_DEBUG_TRACE("Null pointer is specified\n");
513                 return -EINVAL;
514         }
515
516         *port_id = RTE_MAX_ETHPORTS;
517
518         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
519
520                 pci_dev = rte_eth_devices[i].pci_dev;
521
522                 if (pci_dev &&
523                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
524
525                         *port_id = i;
526
527                         return 0;
528                 }
529         }
530         return -ENODEV;
531 }
532
533 static int
534 rte_eth_dev_is_detachable(uint8_t port_id)
535 {
536         uint32_t drv_flags;
537
538         if (!rte_eth_dev_is_valid_port(port_id)) {
539                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
540                 return -EINVAL;
541         }
542
543         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
544                 switch (rte_eth_devices[port_id].pci_dev->kdrv) {
545                 case RTE_KDRV_IGB_UIO:
546                 case RTE_KDRV_UIO_GENERIC:
547                 case RTE_KDRV_NIC_UIO:
548                         break;
549                 case RTE_KDRV_VFIO:
550                 default:
551                         return -ENOTSUP;
552                 }
553         }
554
555         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
556         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
557 }
558
559 /* attach the new physical device, then store port_id of the device */
560 static int
561 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
562 {
563         if ((addr == NULL) || (port_id == NULL))
564                 goto err;
565
566         /* re-construct pci_device_list */
567         if (rte_eal_pci_scan())
568                 goto err;
569         /* Invoke probe func of the driver can handle the new device. */
570         if (rte_eal_pci_probe_one(addr))
571                 goto err;
572
573         if (rte_eth_dev_get_port_by_addr(addr, port_id))
574                 goto err;
575
576         return 0;
577 err:
578         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
579         return -1;
580 }
581
582 /* detach the new physical device, then store pci_addr of the device */
583 static int
584 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
585 {
586         struct rte_pci_addr freed_addr;
587         struct rte_pci_addr vp;
588
589         if (addr == NULL)
590                 goto err;
591
592         /* check whether the driver supports detach feature, or not */
593         if (rte_eth_dev_is_detachable(port_id))
594                 goto err;
595
596         /* get pci address by port id */
597         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
598                 goto err;
599
600         /* Zeroed pci addr means the port comes from virtual device */
601         vp.domain = vp.bus = vp.devid = vp.function = 0;
602         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
603                 goto err;
604
605         /* invoke devuninit func of the pci driver,
606          * also remove the device from pci_device_list */
607         if (rte_eal_pci_detach(&freed_addr))
608                 goto err;
609
610         *addr = freed_addr;
611         return 0;
612 err:
613         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
614         return -1;
615 }
616
617 /* attach the new virtual device, then store port_id of the device */
618 static int
619 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
620 {
621         char *name = NULL, *args = NULL;
622         int ret = -1;
623
624         if ((vdevargs == NULL) || (port_id == NULL))
625                 goto end;
626
627         /* parse vdevargs, then retrieve device name and args */
628         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
629                 goto end;
630
631         /* walk around dev_driver_list to find the driver of the device,
632          * then invoke probe function of the driver.
633          * rte_eal_vdev_init() updates port_id allocated after
634          * initialization.
635          */
636         if (rte_eal_vdev_init(name, args))
637                 goto end;
638
639         if (rte_eth_dev_get_port_by_name(name, port_id))
640                 goto end;
641
642         ret = 0;
643 end:
644         if (name)
645                 free(name);
646         if (args)
647                 free(args);
648
649         if (ret < 0)
650                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
651         return ret;
652 }
653
654 /* detach the new virtual device, then store the name of the device */
655 static int
656 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
657 {
658         char name[RTE_ETH_NAME_MAX_LEN];
659
660         if (vdevname == NULL)
661                 goto err;
662
663         /* check whether the driver supports detach feature, or not */
664         if (rte_eth_dev_is_detachable(port_id))
665                 goto err;
666
667         /* get device name by port id */
668         if (rte_eth_dev_get_name_by_port(port_id, name))
669                 goto err;
670         /* walk around dev_driver_list to find the driver of the device,
671          * then invoke uninit function of the driver */
672         if (rte_eal_vdev_uninit(name))
673                 goto err;
674
675         strncpy(vdevname, name, sizeof(name));
676         return 0;
677 err:
678         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
679         return -1;
680 }
681
682 /* attach the new device, then store port_id of the device */
683 int
684 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
685 {
686         struct rte_pci_addr addr;
687
688         if ((devargs == NULL) || (port_id == NULL))
689                 return -EINVAL;
690
691         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
692                 return rte_eth_dev_attach_pdev(&addr, port_id);
693         else
694                 return rte_eth_dev_attach_vdev(devargs, port_id);
695 }
696
697 /* detach the device, then store the name of the device */
698 int
699 rte_eth_dev_detach(uint8_t port_id, char *name)
700 {
701         struct rte_pci_addr addr;
702         int ret;
703
704         if (name == NULL)
705                 return -EINVAL;
706
707         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
708                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
709                 if (ret < 0)
710                         return ret;
711
712                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
713                 if (ret == 0)
714                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
715                                 "%04x:%02x:%02x.%d",
716                                 addr.domain, addr.bus,
717                                 addr.devid, addr.function);
718
719                 return ret;
720         } else
721                 return rte_eth_dev_detach_vdev(port_id, name);
722 }
723
724 static int
725 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
726 {
727         uint16_t old_nb_queues = dev->data->nb_rx_queues;
728         void **rxq;
729         unsigned i;
730
731         if (dev->data->rx_queues == NULL) { /* first time configuration */
732                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
733                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
734                                 RTE_CACHE_LINE_SIZE);
735                 if (dev->data->rx_queues == NULL) {
736                         dev->data->nb_rx_queues = 0;
737                         return -(ENOMEM);
738                 }
739         } else { /* re-configure */
740                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
741
742                 rxq = dev->data->rx_queues;
743
744                 for (i = nb_queues; i < old_nb_queues; i++)
745                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
746                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
747                                 RTE_CACHE_LINE_SIZE);
748                 if (rxq == NULL)
749                         return -(ENOMEM);
750                 if (nb_queues > old_nb_queues) {
751                         uint16_t new_qs = nb_queues - old_nb_queues;
752
753                         memset(rxq + old_nb_queues, 0,
754                                 sizeof(rxq[0]) * new_qs);
755                 }
756
757                 dev->data->rx_queues = rxq;
758
759         }
760         dev->data->nb_rx_queues = nb_queues;
761         return 0;
762 }
763
764 int
765 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
766 {
767         struct rte_eth_dev *dev;
768
769         /* This function is only safe when called from the primary process
770          * in a multi-process setup*/
771         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
772
773         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
774
775         dev = &rte_eth_devices[port_id];
776         if (rx_queue_id >= dev->data->nb_rx_queues) {
777                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
778                 return -EINVAL;
779         }
780
781         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
782
783         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
784
785 }
786
787 int
788 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
789 {
790         struct rte_eth_dev *dev;
791
792         /* This function is only safe when called from the primary process
793          * in a multi-process setup*/
794         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
795
796         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
797
798         dev = &rte_eth_devices[port_id];
799         if (rx_queue_id >= dev->data->nb_rx_queues) {
800                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
801                 return -EINVAL;
802         }
803
804         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
805
806         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
807
808 }
809
810 int
811 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
812 {
813         struct rte_eth_dev *dev;
814
815         /* This function is only safe when called from the primary process
816          * in a multi-process setup*/
817         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
818
819         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
820
821         dev = &rte_eth_devices[port_id];
822         if (tx_queue_id >= dev->data->nb_tx_queues) {
823                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
824                 return -EINVAL;
825         }
826
827         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
828
829         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
830
831 }
832
833 int
834 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
835 {
836         struct rte_eth_dev *dev;
837
838         /* This function is only safe when called from the primary process
839          * in a multi-process setup*/
840         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
841
842         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
843
844         dev = &rte_eth_devices[port_id];
845         if (tx_queue_id >= dev->data->nb_tx_queues) {
846                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
847                 return -EINVAL;
848         }
849
850         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
851
852         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
853
854 }
855
856 static int
857 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
858 {
859         uint16_t old_nb_queues = dev->data->nb_tx_queues;
860         void **txq;
861         unsigned i;
862
863         if (dev->data->tx_queues == NULL) { /* first time configuration */
864                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
865                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
866                                                    RTE_CACHE_LINE_SIZE);
867                 if (dev->data->tx_queues == NULL) {
868                         dev->data->nb_tx_queues = 0;
869                         return -(ENOMEM);
870                 }
871         } else { /* re-configure */
872                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
873
874                 txq = dev->data->tx_queues;
875
876                 for (i = nb_queues; i < old_nb_queues; i++)
877                         (*dev->dev_ops->tx_queue_release)(txq[i]);
878                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
879                                   RTE_CACHE_LINE_SIZE);
880                 if (txq == NULL)
881                         return -ENOMEM;
882                 if (nb_queues > old_nb_queues) {
883                         uint16_t new_qs = nb_queues - old_nb_queues;
884
885                         memset(txq + old_nb_queues, 0,
886                                sizeof(txq[0]) * new_qs);
887                 }
888
889                 dev->data->tx_queues = txq;
890
891         }
892         dev->data->nb_tx_queues = nb_queues;
893         return 0;
894 }
895
896 int
897 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
898                       const struct rte_eth_conf *dev_conf)
899 {
900         struct rte_eth_dev *dev;
901         struct rte_eth_dev_info dev_info;
902         int diag;
903
904         /* This function is only safe when called from the primary process
905          * in a multi-process setup*/
906         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
907
908         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
909
910         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
911                 PMD_DEBUG_TRACE(
912                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
913                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
914                 return -EINVAL;
915         }
916
917         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
918                 PMD_DEBUG_TRACE(
919                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
920                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
921                 return -EINVAL;
922         }
923
924         dev = &rte_eth_devices[port_id];
925
926         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
927         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
928
929         if (dev->data->dev_started) {
930                 PMD_DEBUG_TRACE(
931                     "port %d must be stopped to allow configuration\n", port_id);
932                 return -EBUSY;
933         }
934
935         /*
936          * Check that the numbers of RX and TX queues are not greater
937          * than the maximum number of RX and TX queues supported by the
938          * configured device.
939          */
940         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
941         if (nb_rx_q > dev_info.max_rx_queues) {
942                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
943                                 port_id, nb_rx_q, dev_info.max_rx_queues);
944                 return -EINVAL;
945         }
946         if (nb_rx_q == 0) {
947                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
948                 return -EINVAL;
949         }
950
951         if (nb_tx_q > dev_info.max_tx_queues) {
952                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
953                                 port_id, nb_tx_q, dev_info.max_tx_queues);
954                 return -EINVAL;
955         }
956         if (nb_tx_q == 0) {
957                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
958                 return -EINVAL;
959         }
960
961         /* Copy the dev_conf parameter into the dev structure */
962         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
963
964         /*
965          * If link state interrupt is enabled, check that the
966          * device supports it.
967          */
968         if (dev_conf->intr_conf.lsc == 1) {
969                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
970
971                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
972                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
973                                         pci_drv->name);
974                         return -EINVAL;
975                 }
976         }
977
978         /*
979          * If jumbo frames are enabled, check that the maximum RX packet
980          * length is supported by the configured device.
981          */
982         if (dev_conf->rxmode.jumbo_frame == 1) {
983                 if (dev_conf->rxmode.max_rx_pkt_len >
984                     dev_info.max_rx_pktlen) {
985                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
986                                 " > max valid value %u\n",
987                                 port_id,
988                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
989                                 (unsigned)dev_info.max_rx_pktlen);
990                         return -EINVAL;
991                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
992                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
993                                 " < min valid value %u\n",
994                                 port_id,
995                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
996                                 (unsigned)ETHER_MIN_LEN);
997                         return -EINVAL;
998                 }
999         } else {
1000                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1001                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1002                         /* Use default value */
1003                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1004                                                         ETHER_MAX_LEN;
1005         }
1006
1007         /*
1008          * Setup new number of RX/TX queues and reconfigure device.
1009          */
1010         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1011         if (diag != 0) {
1012                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1013                                 port_id, diag);
1014                 return diag;
1015         }
1016
1017         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1018         if (diag != 0) {
1019                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1020                                 port_id, diag);
1021                 rte_eth_dev_rx_queue_config(dev, 0);
1022                 return diag;
1023         }
1024
1025         diag = (*dev->dev_ops->dev_configure)(dev);
1026         if (diag != 0) {
1027                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1028                                 port_id, diag);
1029                 rte_eth_dev_rx_queue_config(dev, 0);
1030                 rte_eth_dev_tx_queue_config(dev, 0);
1031                 return diag;
1032         }
1033
1034         return 0;
1035 }
1036
1037 static void
1038 rte_eth_dev_config_restore(uint8_t port_id)
1039 {
1040         struct rte_eth_dev *dev;
1041         struct rte_eth_dev_info dev_info;
1042         struct ether_addr addr;
1043         uint16_t i;
1044         uint32_t pool = 0;
1045
1046         dev = &rte_eth_devices[port_id];
1047
1048         rte_eth_dev_info_get(port_id, &dev_info);
1049
1050         if (RTE_ETH_DEV_SRIOV(dev).active)
1051                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1052
1053         /* replay MAC address configuration */
1054         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1055                 addr = dev->data->mac_addrs[i];
1056
1057                 /* skip zero address */
1058                 if (is_zero_ether_addr(&addr))
1059                         continue;
1060
1061                 /* add address to the hardware */
1062                 if  (*dev->dev_ops->mac_addr_add &&
1063                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1064                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1065                 else {
1066                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1067                                         port_id);
1068                         /* exit the loop but not return an error */
1069                         break;
1070                 }
1071         }
1072
1073         /* replay promiscuous configuration */
1074         if (rte_eth_promiscuous_get(port_id) == 1)
1075                 rte_eth_promiscuous_enable(port_id);
1076         else if (rte_eth_promiscuous_get(port_id) == 0)
1077                 rte_eth_promiscuous_disable(port_id);
1078
1079         /* replay all multicast configuration */
1080         if (rte_eth_allmulticast_get(port_id) == 1)
1081                 rte_eth_allmulticast_enable(port_id);
1082         else if (rte_eth_allmulticast_get(port_id) == 0)
1083                 rte_eth_allmulticast_disable(port_id);
1084 }
1085
1086 int
1087 rte_eth_dev_start(uint8_t port_id)
1088 {
1089         struct rte_eth_dev *dev;
1090         int diag;
1091
1092         /* This function is only safe when called from the primary process
1093          * in a multi-process setup*/
1094         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1095
1096         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1097
1098         dev = &rte_eth_devices[port_id];
1099
1100         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1101
1102         if (dev->data->dev_started != 0) {
1103                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1104                         " already started\n",
1105                         port_id);
1106                 return 0;
1107         }
1108
1109         diag = (*dev->dev_ops->dev_start)(dev);
1110         if (diag == 0)
1111                 dev->data->dev_started = 1;
1112         else
1113                 return diag;
1114
1115         rte_eth_dev_config_restore(port_id);
1116
1117         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1118                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1119                 (*dev->dev_ops->link_update)(dev, 0);
1120         }
1121         return 0;
1122 }
1123
1124 void
1125 rte_eth_dev_stop(uint8_t port_id)
1126 {
1127         struct rte_eth_dev *dev;
1128
1129         /* This function is only safe when called from the primary process
1130          * in a multi-process setup*/
1131         PROC_PRIMARY_OR_RET();
1132
1133         VALID_PORTID_OR_RET(port_id);
1134         dev = &rte_eth_devices[port_id];
1135
1136         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1137
1138         if (dev->data->dev_started == 0) {
1139                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1140                         " already stopped\n",
1141                         port_id);
1142                 return;
1143         }
1144
1145         dev->data->dev_started = 0;
1146         (*dev->dev_ops->dev_stop)(dev);
1147 }
1148
1149 int
1150 rte_eth_dev_set_link_up(uint8_t port_id)
1151 {
1152         struct rte_eth_dev *dev;
1153
1154         /* This function is only safe when called from the primary process
1155          * in a multi-process setup*/
1156         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1157
1158         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1159
1160         dev = &rte_eth_devices[port_id];
1161
1162         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1163         return (*dev->dev_ops->dev_set_link_up)(dev);
1164 }
1165
1166 int
1167 rte_eth_dev_set_link_down(uint8_t port_id)
1168 {
1169         struct rte_eth_dev *dev;
1170
1171         /* This function is only safe when called from the primary process
1172          * in a multi-process setup*/
1173         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1174
1175         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1176
1177         dev = &rte_eth_devices[port_id];
1178
1179         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1180         return (*dev->dev_ops->dev_set_link_down)(dev);
1181 }
1182
1183 void
1184 rte_eth_dev_close(uint8_t port_id)
1185 {
1186         struct rte_eth_dev *dev;
1187
1188         /* This function is only safe when called from the primary process
1189          * in a multi-process setup*/
1190         PROC_PRIMARY_OR_RET();
1191
1192         VALID_PORTID_OR_RET(port_id);
1193         dev = &rte_eth_devices[port_id];
1194
1195         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1196         dev->data->dev_started = 0;
1197         (*dev->dev_ops->dev_close)(dev);
1198
1199         rte_free(dev->data->rx_queues);
1200         dev->data->rx_queues = NULL;
1201         rte_free(dev->data->tx_queues);
1202         dev->data->tx_queues = NULL;
1203 }
1204
1205 int
1206 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1207                        uint16_t nb_rx_desc, unsigned int socket_id,
1208                        const struct rte_eth_rxconf *rx_conf,
1209                        struct rte_mempool *mp)
1210 {
1211         int ret;
1212         uint32_t mbp_buf_size;
1213         struct rte_eth_dev *dev;
1214         struct rte_eth_dev_info dev_info;
1215
1216         /* This function is only safe when called from the primary process
1217          * in a multi-process setup*/
1218         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1219
1220         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1221
1222         dev = &rte_eth_devices[port_id];
1223         if (rx_queue_id >= dev->data->nb_rx_queues) {
1224                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1225                 return -EINVAL;
1226         }
1227
1228         if (dev->data->dev_started) {
1229                 PMD_DEBUG_TRACE(
1230                     "port %d must be stopped to allow configuration\n", port_id);
1231                 return -EBUSY;
1232         }
1233
1234         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1235         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1236
1237         /*
1238          * Check the size of the mbuf data buffer.
1239          * This value must be provided in the private data of the memory pool.
1240          * First check that the memory pool has a valid private data.
1241          */
1242         rte_eth_dev_info_get(port_id, &dev_info);
1243         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1244                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1245                                 mp->name, (int) mp->private_data_size,
1246                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1247                 return -ENOSPC;
1248         }
1249         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1250
1251         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1252                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1253                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1254                                 "=%d)\n",
1255                                 mp->name,
1256                                 (int)mbp_buf_size,
1257                                 (int)(RTE_PKTMBUF_HEADROOM +
1258                                       dev_info.min_rx_bufsize),
1259                                 (int)RTE_PKTMBUF_HEADROOM,
1260                                 (int)dev_info.min_rx_bufsize);
1261                 return -EINVAL;
1262         }
1263
1264         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1265                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1266                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1267
1268                 PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1269                         "should be: <= %hu, = %hu, and a product of %hu\n",
1270                         nb_rx_desc,
1271                         dev_info.rx_desc_lim.nb_max,
1272                         dev_info.rx_desc_lim.nb_min,
1273                         dev_info.rx_desc_lim.nb_align);
1274                 return -EINVAL;
1275         }
1276
1277         if (rx_conf == NULL)
1278                 rx_conf = &dev_info.default_rxconf;
1279
1280         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1281                                               socket_id, rx_conf, mp);
1282         if (!ret) {
1283                 if (!dev->data->min_rx_buf_size ||
1284                     dev->data->min_rx_buf_size > mbp_buf_size)
1285                         dev->data->min_rx_buf_size = mbp_buf_size;
1286         }
1287
1288         return ret;
1289 }
1290
1291 int
1292 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1293                        uint16_t nb_tx_desc, unsigned int socket_id,
1294                        const struct rte_eth_txconf *tx_conf)
1295 {
1296         struct rte_eth_dev *dev;
1297         struct rte_eth_dev_info dev_info;
1298
1299         /* This function is only safe when called from the primary process
1300          * in a multi-process setup*/
1301         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1302
1303         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1304
1305         dev = &rte_eth_devices[port_id];
1306         if (tx_queue_id >= dev->data->nb_tx_queues) {
1307                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1308                 return -EINVAL;
1309         }
1310
1311         if (dev->data->dev_started) {
1312                 PMD_DEBUG_TRACE(
1313                     "port %d must be stopped to allow configuration\n", port_id);
1314                 return -EBUSY;
1315         }
1316
1317         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1318         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1319
1320         rte_eth_dev_info_get(port_id, &dev_info);
1321
1322         if (tx_conf == NULL)
1323                 tx_conf = &dev_info.default_txconf;
1324
1325         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1326                                                socket_id, tx_conf);
1327 }
1328
1329 void
1330 rte_eth_promiscuous_enable(uint8_t port_id)
1331 {
1332         struct rte_eth_dev *dev;
1333
1334         VALID_PORTID_OR_RET(port_id);
1335         dev = &rte_eth_devices[port_id];
1336
1337         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1338         (*dev->dev_ops->promiscuous_enable)(dev);
1339         dev->data->promiscuous = 1;
1340 }
1341
1342 void
1343 rte_eth_promiscuous_disable(uint8_t port_id)
1344 {
1345         struct rte_eth_dev *dev;
1346
1347         VALID_PORTID_OR_RET(port_id);
1348         dev = &rte_eth_devices[port_id];
1349
1350         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1351         dev->data->promiscuous = 0;
1352         (*dev->dev_ops->promiscuous_disable)(dev);
1353 }
1354
1355 int
1356 rte_eth_promiscuous_get(uint8_t port_id)
1357 {
1358         struct rte_eth_dev *dev;
1359
1360         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1361
1362         dev = &rte_eth_devices[port_id];
1363         return dev->data->promiscuous;
1364 }
1365
1366 void
1367 rte_eth_allmulticast_enable(uint8_t port_id)
1368 {
1369         struct rte_eth_dev *dev;
1370
1371         VALID_PORTID_OR_RET(port_id);
1372         dev = &rte_eth_devices[port_id];
1373
1374         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1375         (*dev->dev_ops->allmulticast_enable)(dev);
1376         dev->data->all_multicast = 1;
1377 }
1378
1379 void
1380 rte_eth_allmulticast_disable(uint8_t port_id)
1381 {
1382         struct rte_eth_dev *dev;
1383
1384         VALID_PORTID_OR_RET(port_id);
1385         dev = &rte_eth_devices[port_id];
1386
1387         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1388         dev->data->all_multicast = 0;
1389         (*dev->dev_ops->allmulticast_disable)(dev);
1390 }
1391
1392 int
1393 rte_eth_allmulticast_get(uint8_t port_id)
1394 {
1395         struct rte_eth_dev *dev;
1396
1397         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1398
1399         dev = &rte_eth_devices[port_id];
1400         return dev->data->all_multicast;
1401 }
1402
1403 static inline int
1404 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1405                                 struct rte_eth_link *link)
1406 {
1407         struct rte_eth_link *dst = link;
1408         struct rte_eth_link *src = &(dev->data->dev_link);
1409
1410         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1411                                         *(uint64_t *)src) == 0)
1412                 return -1;
1413
1414         return 0;
1415 }
1416
1417 void
1418 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1419 {
1420         struct rte_eth_dev *dev;
1421
1422         VALID_PORTID_OR_RET(port_id);
1423         dev = &rte_eth_devices[port_id];
1424
1425         if (dev->data->dev_conf.intr_conf.lsc != 0)
1426                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1427         else {
1428                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1429                 (*dev->dev_ops->link_update)(dev, 1);
1430                 *eth_link = dev->data->dev_link;
1431         }
1432 }
1433
1434 void
1435 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1436 {
1437         struct rte_eth_dev *dev;
1438
1439         VALID_PORTID_OR_RET(port_id);
1440         dev = &rte_eth_devices[port_id];
1441
1442         if (dev->data->dev_conf.intr_conf.lsc != 0)
1443                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1444         else {
1445                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1446                 (*dev->dev_ops->link_update)(dev, 0);
1447                 *eth_link = dev->data->dev_link;
1448         }
1449 }
1450
1451 int
1452 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1453 {
1454         struct rte_eth_dev *dev;
1455
1456         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1457
1458         dev = &rte_eth_devices[port_id];
1459         memset(stats, 0, sizeof(*stats));
1460
1461         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1462         (*dev->dev_ops->stats_get)(dev, stats);
1463         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1464         return 0;
1465 }
1466
1467 void
1468 rte_eth_stats_reset(uint8_t port_id)
1469 {
1470         struct rte_eth_dev *dev;
1471
1472         VALID_PORTID_OR_RET(port_id);
1473         dev = &rte_eth_devices[port_id];
1474
1475         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1476         (*dev->dev_ops->stats_reset)(dev);
1477 }
1478
1479 /* retrieve ethdev extended statistics */
1480 int
1481 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1482         unsigned n)
1483 {
1484         struct rte_eth_stats eth_stats;
1485         struct rte_eth_dev *dev;
1486         unsigned count = 0, i, q;
1487         signed xcount = 0;
1488         uint64_t val, *stats_ptr;
1489
1490         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1491
1492         dev = &rte_eth_devices[port_id];
1493
1494         /* Return generic statistics */
1495         count = RTE_NB_STATS;
1496
1497         /* implemented by the driver */
1498         if (dev->dev_ops->xstats_get != NULL) {
1499                 /* Retrieve the xstats from the driver at the end of the
1500                  * xstats struct.
1501                  */
1502                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1503                          (n > count) ? n - count : 0);
1504
1505                 if (xcount < 0)
1506                         return xcount;
1507         } else {
1508                 count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1509                 count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1510         }
1511
1512         if (n < count + xcount)
1513                 return count + xcount;
1514
1515         /* now fill the xstats structure */
1516         count = 0;
1517         rte_eth_stats_get(port_id, &eth_stats);
1518
1519         /* global stats */
1520         for (i = 0; i < RTE_NB_STATS; i++) {
1521                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1522                                         rte_stats_strings[i].offset);
1523                 val = *stats_ptr;
1524                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1525                         "%s", rte_stats_strings[i].name);
1526                 xstats[count++].value = val;
1527         }
1528
1529         /* if xstats_get() is implemented by the PMD, the Q stats are done */
1530         if (dev->dev_ops->xstats_get != NULL)
1531                 return count + xcount;
1532
1533         /* per-rxq stats */
1534         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1535                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1536                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1537                                         rte_rxq_stats_strings[i].offset +
1538                                         q * sizeof(uint64_t));
1539                         val = *stats_ptr;
1540                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1541                                 "rx_q%u_%s", q,
1542                                 rte_rxq_stats_strings[i].name);
1543                         xstats[count++].value = val;
1544                 }
1545         }
1546
1547         /* per-txq stats */
1548         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1549                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1550                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1551                                         rte_txq_stats_strings[i].offset +
1552                                         q * sizeof(uint64_t));
1553                         val = *stats_ptr;
1554                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1555                                 "tx_q%u_%s", q,
1556                                 rte_txq_stats_strings[i].name);
1557                         xstats[count++].value = val;
1558                 }
1559         }
1560
1561         return count + xcount;
1562 }
1563
1564 /* reset ethdev extended statistics */
1565 void
1566 rte_eth_xstats_reset(uint8_t port_id)
1567 {
1568         struct rte_eth_dev *dev;
1569
1570         VALID_PORTID_OR_RET(port_id);
1571         dev = &rte_eth_devices[port_id];
1572
1573         /* implemented by the driver */
1574         if (dev->dev_ops->xstats_reset != NULL) {
1575                 (*dev->dev_ops->xstats_reset)(dev);
1576                 return;
1577         }
1578
1579         /* fallback to default */
1580         rte_eth_stats_reset(port_id);
1581 }
1582
1583 static int
1584 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1585                 uint8_t is_rx)
1586 {
1587         struct rte_eth_dev *dev;
1588
1589         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1590
1591         dev = &rte_eth_devices[port_id];
1592
1593         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1594         return (*dev->dev_ops->queue_stats_mapping_set)
1595                         (dev, queue_id, stat_idx, is_rx);
1596 }
1597
1598
1599 int
1600 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1601                 uint8_t stat_idx)
1602 {
1603         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1604                         STAT_QMAP_TX);
1605 }
1606
1607
1608 int
1609 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1610                 uint8_t stat_idx)
1611 {
1612         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1613                         STAT_QMAP_RX);
1614 }
1615
1616
1617 void
1618 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1619 {
1620         struct rte_eth_dev *dev;
1621         const struct rte_eth_desc_lim lim = {
1622                 .nb_max = UINT16_MAX,
1623                 .nb_min = 0,
1624                 .nb_align = 1,
1625         };
1626
1627         VALID_PORTID_OR_RET(port_id);
1628         dev = &rte_eth_devices[port_id];
1629
1630         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1631         dev_info->rx_desc_lim = lim;
1632         dev_info->tx_desc_lim = lim;
1633
1634         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1635         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1636         dev_info->pci_dev = dev->pci_dev;
1637         if (dev->driver)
1638                 dev_info->driver_name = dev->driver->pci_drv.name;
1639 }
1640
1641 void
1642 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1643 {
1644         struct rte_eth_dev *dev;
1645
1646         VALID_PORTID_OR_RET(port_id);
1647         dev = &rte_eth_devices[port_id];
1648         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1649 }
1650
1651
1652 int
1653 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1654 {
1655         struct rte_eth_dev *dev;
1656
1657         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1658
1659         dev = &rte_eth_devices[port_id];
1660         *mtu = dev->data->mtu;
1661         return 0;
1662 }
1663
1664 int
1665 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1666 {
1667         int ret;
1668         struct rte_eth_dev *dev;
1669
1670         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1671         dev = &rte_eth_devices[port_id];
1672         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1673
1674         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1675         if (!ret)
1676                 dev->data->mtu = mtu;
1677
1678         return ret;
1679 }
1680
1681 int
1682 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1683 {
1684         struct rte_eth_dev *dev;
1685
1686         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1687         dev = &rte_eth_devices[port_id];
1688         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1689                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1690                 return -ENOSYS;
1691         }
1692
1693         if (vlan_id > 4095) {
1694                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1695                                 port_id, (unsigned) vlan_id);
1696                 return -EINVAL;
1697         }
1698         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1699
1700         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1701 }
1702
1703 int
1704 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1705 {
1706         struct rte_eth_dev *dev;
1707
1708         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1709         dev = &rte_eth_devices[port_id];
1710         if (rx_queue_id >= dev->data->nb_rx_queues) {
1711                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1712                 return -EINVAL;
1713         }
1714
1715         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1716         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1717
1718         return 0;
1719 }
1720
1721 int
1722 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1723 {
1724         struct rte_eth_dev *dev;
1725
1726         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1727         dev = &rte_eth_devices[port_id];
1728         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1729         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1730
1731         return 0;
1732 }
1733
1734 int
1735 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1736 {
1737         struct rte_eth_dev *dev;
1738         int ret = 0;
1739         int mask = 0;
1740         int cur, org = 0;
1741
1742         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1743         dev = &rte_eth_devices[port_id];
1744
1745         /*check which option changed by application*/
1746         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1747         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1748         if (cur != org) {
1749                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1750                 mask |= ETH_VLAN_STRIP_MASK;
1751         }
1752
1753         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1754         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1755         if (cur != org) {
1756                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1757                 mask |= ETH_VLAN_FILTER_MASK;
1758         }
1759
1760         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1761         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1762         if (cur != org) {
1763                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1764                 mask |= ETH_VLAN_EXTEND_MASK;
1765         }
1766
1767         /*no change*/
1768         if (mask == 0)
1769                 return ret;
1770
1771         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1772         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1773
1774         return ret;
1775 }
1776
1777 int
1778 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1779 {
1780         struct rte_eth_dev *dev;
1781         int ret = 0;
1782
1783         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1784         dev = &rte_eth_devices[port_id];
1785
1786         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1787                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1788
1789         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1790                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1791
1792         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1793                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1794
1795         return ret;
1796 }
1797
1798 int
1799 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1800 {
1801         struct rte_eth_dev *dev;
1802
1803         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1804         dev = &rte_eth_devices[port_id];
1805         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1806         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1807
1808         return 0;
1809 }
1810
1811 int
1812 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1813 {
1814         struct rte_eth_dev *dev;
1815
1816         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1817         dev = &rte_eth_devices[port_id];
1818         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1819         memset(fc_conf, 0, sizeof(*fc_conf));
1820         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1821 }
1822
1823 int
1824 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1825 {
1826         struct rte_eth_dev *dev;
1827
1828         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1829         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1830                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1831                 return -EINVAL;
1832         }
1833
1834         dev = &rte_eth_devices[port_id];
1835         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1836         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1837 }
1838
1839 int
1840 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1841 {
1842         struct rte_eth_dev *dev;
1843
1844         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1845         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1846                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1847                 return -EINVAL;
1848         }
1849
1850         dev = &rte_eth_devices[port_id];
1851         /* High water, low water validation are device specific */
1852         if  (*dev->dev_ops->priority_flow_ctrl_set)
1853                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1854         return -ENOTSUP;
1855 }
1856
1857 static int
1858 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1859                         uint16_t reta_size)
1860 {
1861         uint16_t i, num;
1862
1863         if (!reta_conf)
1864                 return -EINVAL;
1865
1866         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1867                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1868                                                         RTE_RETA_GROUP_SIZE);
1869                 return -EINVAL;
1870         }
1871
1872         num = reta_size / RTE_RETA_GROUP_SIZE;
1873         for (i = 0; i < num; i++) {
1874                 if (reta_conf[i].mask)
1875                         return 0;
1876         }
1877
1878         return -EINVAL;
1879 }
1880
1881 static int
1882 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1883                          uint16_t reta_size,
1884                          uint8_t max_rxq)
1885 {
1886         uint16_t i, idx, shift;
1887
1888         if (!reta_conf)
1889                 return -EINVAL;
1890
1891         if (max_rxq == 0) {
1892                 PMD_DEBUG_TRACE("No receive queue is available\n");
1893                 return -EINVAL;
1894         }
1895
1896         for (i = 0; i < reta_size; i++) {
1897                 idx = i / RTE_RETA_GROUP_SIZE;
1898                 shift = i % RTE_RETA_GROUP_SIZE;
1899                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1900                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1901                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1902                                 "the maximum rxq index: %u\n", idx, shift,
1903                                 reta_conf[idx].reta[shift], max_rxq);
1904                         return -EINVAL;
1905                 }
1906         }
1907
1908         return 0;
1909 }
1910
1911 int
1912 rte_eth_dev_rss_reta_update(uint8_t port_id,
1913                             struct rte_eth_rss_reta_entry64 *reta_conf,
1914                             uint16_t reta_size)
1915 {
1916         struct rte_eth_dev *dev;
1917         int ret;
1918
1919         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1920         /* Check mask bits */
1921         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1922         if (ret < 0)
1923                 return ret;
1924
1925         dev = &rte_eth_devices[port_id];
1926
1927         /* Check entry value */
1928         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1929                                 dev->data->nb_rx_queues);
1930         if (ret < 0)
1931                 return ret;
1932
1933         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1934         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1935 }
1936
1937 int
1938 rte_eth_dev_rss_reta_query(uint8_t port_id,
1939                            struct rte_eth_rss_reta_entry64 *reta_conf,
1940                            uint16_t reta_size)
1941 {
1942         struct rte_eth_dev *dev;
1943         int ret;
1944
1945         if (port_id >= nb_ports) {
1946                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1947                 return -ENODEV;
1948         }
1949
1950         /* Check mask bits */
1951         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1952         if (ret < 0)
1953                 return ret;
1954
1955         dev = &rte_eth_devices[port_id];
1956         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1957         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1958 }
1959
1960 int
1961 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1962 {
1963         struct rte_eth_dev *dev;
1964         uint16_t rss_hash_protos;
1965
1966         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1967         rss_hash_protos = rss_conf->rss_hf;
1968         if ((rss_hash_protos != 0) &&
1969             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1970                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1971                                 rss_hash_protos);
1972                 return -EINVAL;
1973         }
1974         dev = &rte_eth_devices[port_id];
1975         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1976         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1977 }
1978
1979 int
1980 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1981                               struct rte_eth_rss_conf *rss_conf)
1982 {
1983         struct rte_eth_dev *dev;
1984
1985         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1986         dev = &rte_eth_devices[port_id];
1987         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1988         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1989 }
1990
1991 int
1992 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
1993                            struct rte_eth_udp_tunnel *udp_tunnel)
1994 {
1995         struct rte_eth_dev *dev;
1996
1997         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1998         if (udp_tunnel == NULL) {
1999                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2000                 return -EINVAL;
2001         }
2002
2003         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2004                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2005                 return -EINVAL;
2006         }
2007
2008         dev = &rte_eth_devices[port_id];
2009         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2010         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2011 }
2012
2013 int
2014 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2015                               struct rte_eth_udp_tunnel *udp_tunnel)
2016 {
2017         struct rte_eth_dev *dev;
2018
2019         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2020         dev = &rte_eth_devices[port_id];
2021
2022         if (udp_tunnel == NULL) {
2023                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2024                 return -EINVAL;
2025         }
2026
2027         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2028                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2029                 return -EINVAL;
2030         }
2031
2032         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2033         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2034 }
2035
2036 int
2037 rte_eth_led_on(uint8_t port_id)
2038 {
2039         struct rte_eth_dev *dev;
2040
2041         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2042         dev = &rte_eth_devices[port_id];
2043         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2044         return (*dev->dev_ops->dev_led_on)(dev);
2045 }
2046
2047 int
2048 rte_eth_led_off(uint8_t port_id)
2049 {
2050         struct rte_eth_dev *dev;
2051
2052         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2053         dev = &rte_eth_devices[port_id];
2054         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2055         return (*dev->dev_ops->dev_led_off)(dev);
2056 }
2057
2058 /*
2059  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2060  * an empty spot.
2061  */
2062 static int
2063 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2064 {
2065         struct rte_eth_dev_info dev_info;
2066         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2067         unsigned i;
2068
2069         rte_eth_dev_info_get(port_id, &dev_info);
2070
2071         for (i = 0; i < dev_info.max_mac_addrs; i++)
2072                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2073                         return i;
2074
2075         return -1;
2076 }
2077
2078 static const struct ether_addr null_mac_addr;
2079
2080 int
2081 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2082                         uint32_t pool)
2083 {
2084         struct rte_eth_dev *dev;
2085         int index;
2086         uint64_t pool_mask;
2087
2088         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2089         dev = &rte_eth_devices[port_id];
2090         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2091
2092         if (is_zero_ether_addr(addr)) {
2093                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2094                         port_id);
2095                 return -EINVAL;
2096         }
2097         if (pool >= ETH_64_POOLS) {
2098                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2099                 return -EINVAL;
2100         }
2101
2102         index = get_mac_addr_index(port_id, addr);
2103         if (index < 0) {
2104                 index = get_mac_addr_index(port_id, &null_mac_addr);
2105                 if (index < 0) {
2106                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2107                                 port_id);
2108                         return -ENOSPC;
2109                 }
2110         } else {
2111                 pool_mask = dev->data->mac_pool_sel[index];
2112
2113                 /* Check if both MAC address and pool is already there, and do nothing */
2114                 if (pool_mask & (1ULL << pool))
2115                         return 0;
2116         }
2117
2118         /* Update NIC */
2119         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2120
2121         /* Update address in NIC data structure */
2122         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2123
2124         /* Update pool bitmap in NIC data structure */
2125         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2126
2127         return 0;
2128 }
2129
2130 int
2131 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2132 {
2133         struct rte_eth_dev *dev;
2134         int index;
2135
2136         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2137         dev = &rte_eth_devices[port_id];
2138         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2139
2140         index = get_mac_addr_index(port_id, addr);
2141         if (index == 0) {
2142                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2143                 return -EADDRINUSE;
2144         } else if (index < 0)
2145                 return 0;  /* Do nothing if address wasn't found */
2146
2147         /* Update NIC */
2148         (*dev->dev_ops->mac_addr_remove)(dev, index);
2149
2150         /* Update address in NIC data structure */
2151         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2152
2153         /* reset pool bitmap */
2154         dev->data->mac_pool_sel[index] = 0;
2155
2156         return 0;
2157 }
2158
2159 int
2160 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2161 {
2162         struct rte_eth_dev *dev;
2163
2164         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2165
2166         if (!is_valid_assigned_ether_addr(addr))
2167                 return -EINVAL;
2168
2169         dev = &rte_eth_devices[port_id];
2170         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2171
2172         /* Update default address in NIC data structure */
2173         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2174
2175         (*dev->dev_ops->mac_addr_set)(dev, addr);
2176
2177         return 0;
2178 }
2179
2180 int
2181 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2182                                 uint16_t rx_mode, uint8_t on)
2183 {
2184         uint16_t num_vfs;
2185         struct rte_eth_dev *dev;
2186         struct rte_eth_dev_info dev_info;
2187
2188         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2189
2190         dev = &rte_eth_devices[port_id];
2191         rte_eth_dev_info_get(port_id, &dev_info);
2192
2193         num_vfs = dev_info.max_vfs;
2194         if (vf > num_vfs) {
2195                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2196                 return -EINVAL;
2197         }
2198
2199         if (rx_mode == 0) {
2200                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2201                 return -EINVAL;
2202         }
2203         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2204         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2205 }
2206
2207 /*
2208  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2209  * an empty spot.
2210  */
2211 static int
2212 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2213 {
2214         struct rte_eth_dev_info dev_info;
2215         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2216         unsigned i;
2217
2218         rte_eth_dev_info_get(port_id, &dev_info);
2219         if (!dev->data->hash_mac_addrs)
2220                 return -1;
2221
2222         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2223                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2224                         ETHER_ADDR_LEN) == 0)
2225                         return i;
2226
2227         return -1;
2228 }
2229
2230 int
2231 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2232                                 uint8_t on)
2233 {
2234         int index;
2235         int ret;
2236         struct rte_eth_dev *dev;
2237
2238         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2239
2240         dev = &rte_eth_devices[port_id];
2241         if (is_zero_ether_addr(addr)) {
2242                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2243                         port_id);
2244                 return -EINVAL;
2245         }
2246
2247         index = get_hash_mac_addr_index(port_id, addr);
2248         /* Check if it's already there, and do nothing */
2249         if ((index >= 0) && (on))
2250                 return 0;
2251
2252         if (index < 0) {
2253                 if (!on) {
2254                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2255                                 "set in UTA\n", port_id);
2256                         return -EINVAL;
2257                 }
2258
2259                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2260                 if (index < 0) {
2261                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2262                                         port_id);
2263                         return -ENOSPC;
2264                 }
2265         }
2266
2267         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2268         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2269         if (ret == 0) {
2270                 /* Update address in NIC data structure */
2271                 if (on)
2272                         ether_addr_copy(addr,
2273                                         &dev->data->hash_mac_addrs[index]);
2274                 else
2275                         ether_addr_copy(&null_mac_addr,
2276                                         &dev->data->hash_mac_addrs[index]);
2277         }
2278
2279         return ret;
2280 }
2281
2282 int
2283 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2284 {
2285         struct rte_eth_dev *dev;
2286
2287         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2288
2289         dev = &rte_eth_devices[port_id];
2290
2291         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2292         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2293 }
2294
2295 int
2296 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2297 {
2298         uint16_t num_vfs;
2299         struct rte_eth_dev *dev;
2300         struct rte_eth_dev_info dev_info;
2301
2302         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2303
2304         dev = &rte_eth_devices[port_id];
2305         rte_eth_dev_info_get(port_id, &dev_info);
2306
2307         num_vfs = dev_info.max_vfs;
2308         if (vf > num_vfs) {
2309                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2310                 return -EINVAL;
2311         }
2312
2313         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2314         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2315 }
2316
2317 int
2318 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2319 {
2320         uint16_t num_vfs;
2321         struct rte_eth_dev *dev;
2322         struct rte_eth_dev_info dev_info;
2323
2324         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2325
2326         dev = &rte_eth_devices[port_id];
2327         rte_eth_dev_info_get(port_id, &dev_info);
2328
2329         num_vfs = dev_info.max_vfs;
2330         if (vf > num_vfs) {
2331                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2332                 return -EINVAL;
2333         }
2334
2335         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2336         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2337 }
2338
2339 int
2340 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2341                                uint64_t vf_mask, uint8_t vlan_on)
2342 {
2343         struct rte_eth_dev *dev;
2344
2345         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2346
2347         dev = &rte_eth_devices[port_id];
2348
2349         if (vlan_id > ETHER_MAX_VLAN_ID) {
2350                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2351                         vlan_id);
2352                 return -EINVAL;
2353         }
2354
2355         if (vf_mask == 0) {
2356                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2357                 return -EINVAL;
2358         }
2359
2360         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2361         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2362                                                    vf_mask, vlan_on);
2363 }
2364
2365 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2366                                         uint16_t tx_rate)
2367 {
2368         struct rte_eth_dev *dev;
2369         struct rte_eth_dev_info dev_info;
2370         struct rte_eth_link link;
2371
2372         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2373
2374         dev = &rte_eth_devices[port_id];
2375         rte_eth_dev_info_get(port_id, &dev_info);
2376         link = dev->data->dev_link;
2377
2378         if (queue_idx > dev_info.max_tx_queues) {
2379                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2380                                 "invalid queue id=%d\n", port_id, queue_idx);
2381                 return -EINVAL;
2382         }
2383
2384         if (tx_rate > link.link_speed) {
2385                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2386                                 "bigger than link speed= %d\n",
2387                         tx_rate, link.link_speed);
2388                 return -EINVAL;
2389         }
2390
2391         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2392         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2393 }
2394
2395 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2396                                 uint64_t q_msk)
2397 {
2398         struct rte_eth_dev *dev;
2399         struct rte_eth_dev_info dev_info;
2400         struct rte_eth_link link;
2401
2402         if (q_msk == 0)
2403                 return 0;
2404
2405         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2406
2407         dev = &rte_eth_devices[port_id];
2408         rte_eth_dev_info_get(port_id, &dev_info);
2409         link = dev->data->dev_link;
2410
2411         if (vf > dev_info.max_vfs) {
2412                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2413                                 "invalid vf id=%d\n", port_id, vf);
2414                 return -EINVAL;
2415         }
2416
2417         if (tx_rate > link.link_speed) {
2418                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2419                                 "bigger than link speed= %d\n",
2420                                 tx_rate, link.link_speed);
2421                 return -EINVAL;
2422         }
2423
2424         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2425         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2426 }
2427
2428 int
2429 rte_eth_mirror_rule_set(uint8_t port_id,
2430                         struct rte_eth_mirror_conf *mirror_conf,
2431                         uint8_t rule_id, uint8_t on)
2432 {
2433         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2434
2435         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2436         if (mirror_conf->rule_type == 0) {
2437                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2438                 return -EINVAL;
2439         }
2440
2441         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2442                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2443                                 ETH_64_POOLS - 1);
2444                 return -EINVAL;
2445         }
2446
2447         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2448              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2449             (mirror_conf->pool_mask == 0)) {
2450                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2451                 return -EINVAL;
2452         }
2453
2454         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2455             mirror_conf->vlan.vlan_mask == 0) {
2456                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2457                 return -EINVAL;
2458         }
2459
2460         dev = &rte_eth_devices[port_id];
2461         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2462
2463         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2464 }
2465
2466 int
2467 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2468 {
2469         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2470
2471         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2472
2473         dev = &rte_eth_devices[port_id];
2474         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2475
2476         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2477 }
2478
2479 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2480 uint16_t
2481 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2482                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2483 {
2484         struct rte_eth_dev *dev;
2485
2486         VALID_PORTID_OR_ERR_RET(port_id, 0);
2487
2488         dev = &rte_eth_devices[port_id];
2489         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2490         if (queue_id >= dev->data->nb_rx_queues) {
2491                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2492                 return 0;
2493         }
2494         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2495                                                 rx_pkts, nb_pkts);
2496 }
2497
2498 uint16_t
2499 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2500                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2501 {
2502         struct rte_eth_dev *dev;
2503
2504         VALID_PORTID_OR_ERR_RET(port_id, 0);
2505
2506         dev = &rte_eth_devices[port_id];
2507
2508         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2509         if (queue_id >= dev->data->nb_tx_queues) {
2510                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2511                 return 0;
2512         }
2513         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2514                                                 tx_pkts, nb_pkts);
2515 }
2516
2517 uint32_t
2518 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2519 {
2520         struct rte_eth_dev *dev;
2521
2522         VALID_PORTID_OR_ERR_RET(port_id, 0);
2523
2524         dev = &rte_eth_devices[port_id];
2525         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2526         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2527 }
2528
2529 int
2530 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2531 {
2532         struct rte_eth_dev *dev;
2533
2534         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2535
2536         dev = &rte_eth_devices[port_id];
2537         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2538         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2539                                                    offset);
2540 }
2541 #endif
2542
2543 int
2544 rte_eth_dev_callback_register(uint8_t port_id,
2545                         enum rte_eth_event_type event,
2546                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2547 {
2548         struct rte_eth_dev *dev;
2549         struct rte_eth_dev_callback *user_cb;
2550
2551         if (!cb_fn)
2552                 return -EINVAL;
2553
2554         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2555
2556         dev = &rte_eth_devices[port_id];
2557         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2558
2559         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2560                 if (user_cb->cb_fn == cb_fn &&
2561                         user_cb->cb_arg == cb_arg &&
2562                         user_cb->event == event) {
2563                         break;
2564                 }
2565         }
2566
2567         /* create a new callback. */
2568         if (user_cb == NULL)
2569                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2570                                       sizeof(struct rte_eth_dev_callback), 0);
2571         if (user_cb != NULL) {
2572                 user_cb->cb_fn = cb_fn;
2573                 user_cb->cb_arg = cb_arg;
2574                 user_cb->event = event;
2575                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2576         }
2577
2578         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2579         return (user_cb == NULL) ? -ENOMEM : 0;
2580 }
2581
2582 int
2583 rte_eth_dev_callback_unregister(uint8_t port_id,
2584                         enum rte_eth_event_type event,
2585                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2586 {
2587         int ret;
2588         struct rte_eth_dev *dev;
2589         struct rte_eth_dev_callback *cb, *next;
2590
2591         if (!cb_fn)
2592                 return -EINVAL;
2593
2594         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2595
2596         dev = &rte_eth_devices[port_id];
2597         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2598
2599         ret = 0;
2600         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2601
2602                 next = TAILQ_NEXT(cb, next);
2603
2604                 if (cb->cb_fn != cb_fn || cb->event != event ||
2605                                 (cb->cb_arg != (void *)-1 &&
2606                                 cb->cb_arg != cb_arg))
2607                         continue;
2608
2609                 /*
2610                  * if this callback is not executing right now,
2611                  * then remove it.
2612                  */
2613                 if (cb->active == 0) {
2614                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2615                         rte_free(cb);
2616                 } else {
2617                         ret = -EAGAIN;
2618                 }
2619         }
2620
2621         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2622         return ret;
2623 }
2624
2625 void
2626 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2627         enum rte_eth_event_type event)
2628 {
2629         struct rte_eth_dev_callback *cb_lst;
2630         struct rte_eth_dev_callback dev_cb;
2631
2632         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2633         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2634                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2635                         continue;
2636                 dev_cb = *cb_lst;
2637                 cb_lst->active = 1;
2638                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2639                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2640                                                 dev_cb.cb_arg);
2641                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2642                 cb_lst->active = 0;
2643         }
2644         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2645 }
2646
2647 int
2648 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2649 {
2650         uint32_t vec;
2651         struct rte_eth_dev *dev;
2652         struct rte_intr_handle *intr_handle;
2653         uint16_t qid;
2654         int rc;
2655
2656         if (!rte_eth_dev_is_valid_port(port_id)) {
2657                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2658                 return -ENODEV;
2659         }
2660
2661         dev = &rte_eth_devices[port_id];
2662         intr_handle = &dev->pci_dev->intr_handle;
2663         if (!intr_handle->intr_vec) {
2664                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2665                 return -EPERM;
2666         }
2667
2668         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2669                 vec = intr_handle->intr_vec[qid];
2670                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2671                 if (rc && rc != -EEXIST) {
2672                         PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2673                                         " op %d epfd %d vec %u\n",
2674                                         port_id, qid, op, epfd, vec);
2675                 }
2676         }
2677
2678         return 0;
2679 }
2680
2681 int
2682 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2683                           int epfd, int op, void *data)
2684 {
2685         uint32_t vec;
2686         struct rte_eth_dev *dev;
2687         struct rte_intr_handle *intr_handle;
2688         int rc;
2689
2690         if (!rte_eth_dev_is_valid_port(port_id)) {
2691                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2692                 return -ENODEV;
2693         }
2694
2695         dev = &rte_eth_devices[port_id];
2696         if (queue_id >= dev->data->nb_rx_queues) {
2697                 PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2698                 return -EINVAL;
2699         }
2700
2701         intr_handle = &dev->pci_dev->intr_handle;
2702         if (!intr_handle->intr_vec) {
2703                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2704                 return -EPERM;
2705         }
2706
2707         vec = intr_handle->intr_vec[queue_id];
2708         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2709         if (rc && rc != -EEXIST) {
2710                 PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2711                                 " op %d epfd %d vec %u\n",
2712                                 port_id, queue_id, op, epfd, vec);
2713                 return rc;
2714         }
2715
2716         return 0;
2717 }
2718
2719 int
2720 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2721                            uint16_t queue_id)
2722 {
2723         struct rte_eth_dev *dev;
2724
2725         if (!rte_eth_dev_is_valid_port(port_id)) {
2726                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2727                 return -ENODEV;
2728         }
2729
2730         dev = &rte_eth_devices[port_id];
2731
2732         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2733         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2734 }
2735
2736 int
2737 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2738                             uint16_t queue_id)
2739 {
2740         struct rte_eth_dev *dev;
2741
2742         if (!rte_eth_dev_is_valid_port(port_id)) {
2743                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2744                 return -ENODEV;
2745         }
2746
2747         dev = &rte_eth_devices[port_id];
2748
2749         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2750         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2751 }
2752
2753 #ifdef RTE_NIC_BYPASS
2754 int rte_eth_dev_bypass_init(uint8_t port_id)
2755 {
2756         struct rte_eth_dev *dev;
2757
2758         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2759
2760         dev = &rte_eth_devices[port_id];
2761         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2762         (*dev->dev_ops->bypass_init)(dev);
2763         return 0;
2764 }
2765
2766 int
2767 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2768 {
2769         struct rte_eth_dev *dev;
2770
2771         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2772
2773         dev = &rte_eth_devices[port_id];
2774         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2775         (*dev->dev_ops->bypass_state_show)(dev, state);
2776         return 0;
2777 }
2778
2779 int
2780 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2781 {
2782         struct rte_eth_dev *dev;
2783
2784         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2785
2786         dev = &rte_eth_devices[port_id];
2787         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2788         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2789         return 0;
2790 }
2791
2792 int
2793 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2794 {
2795         struct rte_eth_dev *dev;
2796
2797         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2798
2799         dev = &rte_eth_devices[port_id];
2800         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2801         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2802         return 0;
2803 }
2804
2805 int
2806 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2807 {
2808         struct rte_eth_dev *dev;
2809
2810         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2811
2812         dev = &rte_eth_devices[port_id];
2813
2814         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2815         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2816         return 0;
2817 }
2818
2819 int
2820 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825
2826         dev = &rte_eth_devices[port_id];
2827
2828         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2829         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2830         return 0;
2831 }
2832
2833 int
2834 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2835 {
2836         struct rte_eth_dev *dev;
2837
2838         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2839
2840         dev = &rte_eth_devices[port_id];
2841
2842         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2843         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2844         return 0;
2845 }
2846
2847 int
2848 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2849 {
2850         struct rte_eth_dev *dev;
2851
2852         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2853
2854         dev = &rte_eth_devices[port_id];
2855
2856         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2857         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2858         return 0;
2859 }
2860
2861 int
2862 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2863 {
2864         struct rte_eth_dev *dev;
2865
2866         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2867
2868         dev = &rte_eth_devices[port_id];
2869
2870         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2871         (*dev->dev_ops->bypass_wd_reset)(dev);
2872         return 0;
2873 }
2874 #endif
2875
2876 int
2877 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2878 {
2879         struct rte_eth_dev *dev;
2880
2881         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2882
2883         dev = &rte_eth_devices[port_id];
2884         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2885         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2886                                 RTE_ETH_FILTER_NOP, NULL);
2887 }
2888
2889 int
2890 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2891                        enum rte_filter_op filter_op, void *arg)
2892 {
2893         struct rte_eth_dev *dev;
2894
2895         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2896
2897         dev = &rte_eth_devices[port_id];
2898         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2899         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2900 }
2901
2902 void *
2903 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2904                 rte_rx_callback_fn fn, void *user_param)
2905 {
2906 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2907         rte_errno = ENOTSUP;
2908         return NULL;
2909 #endif
2910         /* check input parameters */
2911         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2912                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2913                 rte_errno = EINVAL;
2914                 return NULL;
2915         }
2916
2917         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2918
2919         if (cb == NULL) {
2920                 rte_errno = ENOMEM;
2921                 return NULL;
2922         }
2923
2924         cb->fn.rx = fn;
2925         cb->param = user_param;
2926
2927         /* Add the callbacks in fifo order. */
2928         struct rte_eth_rxtx_callback *tail =
2929                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2930
2931         if (!tail) {
2932                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2933
2934         } else {
2935                 while (tail->next)
2936                         tail = tail->next;
2937                 tail->next = cb;
2938         }
2939
2940         return cb;
2941 }
2942
2943 void *
2944 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2945                 rte_tx_callback_fn fn, void *user_param)
2946 {
2947 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2948         rte_errno = ENOTSUP;
2949         return NULL;
2950 #endif
2951         /* check input parameters */
2952         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2953                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2954                 rte_errno = EINVAL;
2955                 return NULL;
2956         }
2957
2958         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2959
2960         if (cb == NULL) {
2961                 rte_errno = ENOMEM;
2962                 return NULL;
2963         }
2964
2965         cb->fn.tx = fn;
2966         cb->param = user_param;
2967
2968         /* Add the callbacks in fifo order. */
2969         struct rte_eth_rxtx_callback *tail =
2970                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2971
2972         if (!tail) {
2973                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2974
2975         } else {
2976                 while (tail->next)
2977                         tail = tail->next;
2978                 tail->next = cb;
2979         }
2980
2981         return cb;
2982 }
2983
2984 int
2985 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2986                 struct rte_eth_rxtx_callback *user_cb)
2987 {
2988 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2989         return -ENOTSUP;
2990 #endif
2991         /* Check input parameters. */
2992         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
2993                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2994                 return -EINVAL;
2995         }
2996
2997         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2998         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2999         struct rte_eth_rxtx_callback *prev_cb;
3000
3001         /* Reset head pointer and remove user cb if first in the list. */
3002         if (cb == user_cb) {
3003                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3004                 return 0;
3005         }
3006
3007         /* Remove the user cb from the callback list. */
3008         do {
3009                 prev_cb = cb;
3010                 cb = cb->next;
3011
3012                 if (cb == user_cb) {
3013                         prev_cb->next = user_cb->next;
3014                         return 0;
3015                 }
3016
3017         } while (cb != NULL);
3018
3019         /* Callback wasn't found. */
3020         return -EINVAL;
3021 }
3022
3023 int
3024 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3025                 struct rte_eth_rxtx_callback *user_cb)
3026 {
3027 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3028         return -ENOTSUP;
3029 #endif
3030         /* Check input parameters. */
3031         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3032                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3033                 return -EINVAL;
3034         }
3035
3036         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3037         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3038         struct rte_eth_rxtx_callback *prev_cb;
3039
3040         /* Reset head pointer and remove user cb if first in the list. */
3041         if (cb == user_cb) {
3042                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3043                 return 0;
3044         }
3045
3046         /* Remove the user cb from the callback list. */
3047         do {
3048                 prev_cb = cb;
3049                 cb = cb->next;
3050
3051                 if (cb == user_cb) {
3052                         prev_cb->next = user_cb->next;
3053                         return 0;
3054                 }
3055
3056         } while (cb != NULL);
3057
3058         /* Callback wasn't found. */
3059         return -EINVAL;
3060 }
3061
3062 int
3063 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3064         struct rte_eth_rxq_info *qinfo)
3065 {
3066         struct rte_eth_dev *dev;
3067
3068         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3069
3070         if (qinfo == NULL)
3071                 return -EINVAL;
3072
3073         dev = &rte_eth_devices[port_id];
3074         if (queue_id >= dev->data->nb_rx_queues) {
3075                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3076                 return -EINVAL;
3077         }
3078
3079         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3080
3081         memset(qinfo, 0, sizeof(*qinfo));
3082         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3083         return 0;
3084 }
3085
3086 int
3087 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3088         struct rte_eth_txq_info *qinfo)
3089 {
3090         struct rte_eth_dev *dev;
3091
3092         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3093
3094         if (qinfo == NULL)
3095                 return -EINVAL;
3096
3097         dev = &rte_eth_devices[port_id];
3098         if (queue_id >= dev->data->nb_tx_queues) {
3099                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3100                 return -EINVAL;
3101         }
3102
3103         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3104
3105         memset(qinfo, 0, sizeof(*qinfo));
3106         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3107         return 0;
3108 }
3109
3110 int
3111 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3112                              struct ether_addr *mc_addr_set,
3113                              uint32_t nb_mc_addr)
3114 {
3115         struct rte_eth_dev *dev;
3116
3117         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3118
3119         dev = &rte_eth_devices[port_id];
3120         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3121         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3122 }
3123
3124 int
3125 rte_eth_timesync_enable(uint8_t port_id)
3126 {
3127         struct rte_eth_dev *dev;
3128
3129         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3130         dev = &rte_eth_devices[port_id];
3131
3132         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3133         return (*dev->dev_ops->timesync_enable)(dev);
3134 }
3135
3136 int
3137 rte_eth_timesync_disable(uint8_t port_id)
3138 {
3139         struct rte_eth_dev *dev;
3140
3141         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3142         dev = &rte_eth_devices[port_id];
3143
3144         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3145         return (*dev->dev_ops->timesync_disable)(dev);
3146 }
3147
3148 int
3149 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3150                                    uint32_t flags)
3151 {
3152         struct rte_eth_dev *dev;
3153
3154         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3155         dev = &rte_eth_devices[port_id];
3156
3157         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3158         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3159 }
3160
3161 int
3162 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3163 {
3164         struct rte_eth_dev *dev;
3165
3166         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3167         dev = &rte_eth_devices[port_id];
3168
3169         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3170         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3171 }
3172
3173 int
3174 rte_eth_dev_get_reg_length(uint8_t port_id)
3175 {
3176         struct rte_eth_dev *dev;
3177
3178         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179
3180         dev = &rte_eth_devices[port_id];
3181         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3182         return (*dev->dev_ops->get_reg_length)(dev);
3183 }
3184
3185 int
3186 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3187 {
3188         struct rte_eth_dev *dev;
3189
3190         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3191
3192         dev = &rte_eth_devices[port_id];
3193         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3194         return (*dev->dev_ops->get_reg)(dev, info);
3195 }
3196
3197 int
3198 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3199 {
3200         struct rte_eth_dev *dev;
3201
3202         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3203
3204         dev = &rte_eth_devices[port_id];
3205         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3206         return (*dev->dev_ops->get_eeprom_length)(dev);
3207 }
3208
3209 int
3210 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3211 {
3212         struct rte_eth_dev *dev;
3213
3214         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3215
3216         dev = &rte_eth_devices[port_id];
3217         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3218         return (*dev->dev_ops->get_eeprom)(dev, info);
3219 }
3220
3221 int
3222 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3223 {
3224         struct rte_eth_dev *dev;
3225
3226         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3227
3228         dev = &rte_eth_devices[port_id];
3229         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3230         return (*dev->dev_ops->set_eeprom)(dev, info);
3231 }
3232
3233 int
3234 rte_eth_dev_get_dcb_info(uint8_t port_id,
3235                              struct rte_eth_dcb_info *dcb_info)
3236 {
3237         struct rte_eth_dev *dev;
3238
3239         if (!rte_eth_dev_is_valid_port(port_id)) {
3240                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3241                 return -ENODEV;
3242         }
3243
3244         dev = &rte_eth_devices[port_id];
3245         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3246
3247         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3248         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3249 }