d990ed8d5a677e3fd6091cca7998040109cc8868
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
145         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
146         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
147                 rx_nombuf)},
148 };
149
150 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
151
152 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
153         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
154         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
155         {"errors", offsetof(struct rte_eth_stats, q_errors)},
156 };
157
158 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
159                 sizeof(rte_rxq_stats_strings[0]))
160
161 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
162         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
163         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
164 };
165 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
166                 sizeof(rte_txq_stats_strings[0]))
167
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 enum {
189         DEV_DETACHED = 0,
190         DEV_ATTACHED
191 };
192
193 static void
194 rte_eth_dev_data_alloc(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
200                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
202                                 rte_socket_id(), flags);
203         } else
204                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
205         if (mz == NULL)
206                 rte_panic("Cannot allocate memzone for ethernet port data\n");
207
208         rte_eth_dev_data = mz->addr;
209         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210                 memset(rte_eth_dev_data, 0,
211                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
212 }
213
214 struct rte_eth_dev *
215 rte_eth_dev_allocated(const char *name)
216 {
217         unsigned i;
218
219         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
220                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
221                     strcmp(rte_eth_devices[i].data->name, name) == 0)
222                         return &rte_eth_devices[i];
223         }
224         return NULL;
225 }
226
227 static uint8_t
228 rte_eth_dev_find_free_port(void)
229 {
230         unsigned i;
231
232         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
233                 if (rte_eth_devices[i].attached == DEV_DETACHED)
234                         return i;
235         }
236         return RTE_MAX_ETHPORTS;
237 }
238
239 struct rte_eth_dev *
240 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
241 {
242         uint8_t port_id;
243         struct rte_eth_dev *eth_dev;
244
245         port_id = rte_eth_dev_find_free_port();
246         if (port_id == RTE_MAX_ETHPORTS) {
247                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
248                 return NULL;
249         }
250
251         if (rte_eth_dev_data == NULL)
252                 rte_eth_dev_data_alloc();
253
254         if (rte_eth_dev_allocated(name) != NULL) {
255                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
256                                 name);
257                 return NULL;
258         }
259
260         eth_dev = &rte_eth_devices[port_id];
261         eth_dev->data = &rte_eth_dev_data[port_id];
262         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
263         eth_dev->data->port_id = port_id;
264         eth_dev->attached = DEV_ATTACHED;
265         eth_dev->dev_type = type;
266         nb_ports++;
267         return eth_dev;
268 }
269
270 static int
271 rte_eth_dev_create_unique_device_name(char *name, size_t size,
272                 struct rte_pci_device *pci_dev)
273 {
274         int ret;
275
276         if ((name == NULL) || (pci_dev == NULL))
277                 return -EINVAL;
278
279         ret = snprintf(name, size, "%d:%d.%d",
280                         pci_dev->addr.bus, pci_dev->addr.devid,
281                         pci_dev->addr.function);
282         if (ret < 0)
283                 return ret;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
289 {
290         if (eth_dev == NULL)
291                 return -EINVAL;
292
293         eth_dev->attached = DEV_DETACHED;
294         nb_ports--;
295         return 0;
296 }
297
298 static int
299 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
300                  struct rte_pci_device *pci_dev)
301 {
302         struct eth_driver    *eth_drv;
303         struct rte_eth_dev *eth_dev;
304         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
305
306         int diag;
307
308         eth_drv = (struct eth_driver *)pci_drv;
309
310         /* Create unique Ethernet device name using PCI address */
311         rte_eth_dev_create_unique_device_name(ethdev_name,
312                         sizeof(ethdev_name), pci_dev);
313
314         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
315         if (eth_dev == NULL)
316                 return -ENOMEM;
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
319                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
320                                   eth_drv->dev_private_size,
321                                   RTE_CACHE_LINE_SIZE);
322                 if (eth_dev->data->dev_private == NULL)
323                         rte_panic("Cannot allocate memzone for private port data\n");
324         }
325         eth_dev->pci_dev = pci_dev;
326         eth_dev->driver = eth_drv;
327         eth_dev->data->rx_mbuf_alloc_failed = 0;
328
329         /* init user callbacks */
330         TAILQ_INIT(&(eth_dev->link_intr_cbs));
331
332         /*
333          * Set the default MTU.
334          */
335         eth_dev->data->mtu = ETHER_MTU;
336
337         /* Invoke PMD device initialization function */
338         diag = (*eth_drv->eth_dev_init)(eth_dev);
339         if (diag == 0)
340                 return 0;
341
342         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
343                         pci_drv->name,
344                         (unsigned) pci_dev->id.vendor_id,
345                         (unsigned) pci_dev->id.device_id);
346         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
347                 rte_free(eth_dev->data->dev_private);
348         rte_eth_dev_release_port(eth_dev);
349         return diag;
350 }
351
352 static int
353 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
354 {
355         const struct eth_driver *eth_drv;
356         struct rte_eth_dev *eth_dev;
357         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
358         int ret;
359
360         if (pci_dev == NULL)
361                 return -EINVAL;
362
363         /* Create unique Ethernet device name using PCI address */
364         rte_eth_dev_create_unique_device_name(ethdev_name,
365                         sizeof(ethdev_name), pci_dev);
366
367         eth_dev = rte_eth_dev_allocated(ethdev_name);
368         if (eth_dev == NULL)
369                 return -ENODEV;
370
371         eth_drv = (const struct eth_driver *)pci_dev->driver;
372
373         /* Invoke PMD device uninit function */
374         if (*eth_drv->eth_dev_uninit) {
375                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
376                 if (ret)
377                         return ret;
378         }
379
380         /* free ether device */
381         rte_eth_dev_release_port(eth_dev);
382
383         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
384                 rte_free(eth_dev->data->dev_private);
385
386         eth_dev->pci_dev = NULL;
387         eth_dev->driver = NULL;
388         eth_dev->data = NULL;
389
390         return 0;
391 }
392
393 /**
394  * Register an Ethernet [Poll Mode] driver.
395  *
396  * Function invoked by the initialization function of an Ethernet driver
397  * to simultaneously register itself as a PCI driver and as an Ethernet
398  * Poll Mode Driver.
399  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
400  * structure embedded in the *eth_drv* structure, after having stored the
401  * address of the rte_eth_dev_init() function in the *devinit* field of
402  * the *pci_drv* structure.
403  * During the PCI probing phase, the rte_eth_dev_init() function is
404  * invoked for each PCI [Ethernet device] matching the embedded PCI
405  * identifiers provided by the driver.
406  */
407 void
408 rte_eth_driver_register(struct eth_driver *eth_drv)
409 {
410         eth_drv->pci_drv.devinit = rte_eth_dev_init;
411         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
412         rte_eal_pci_register(&eth_drv->pci_drv);
413 }
414
415 int
416 rte_eth_dev_is_valid_port(uint8_t port_id)
417 {
418         if (port_id >= RTE_MAX_ETHPORTS ||
419             rte_eth_devices[port_id].attached != DEV_ATTACHED)
420                 return 0;
421         else
422                 return 1;
423 }
424
425 int
426 rte_eth_dev_socket_id(uint8_t port_id)
427 {
428         if (!rte_eth_dev_is_valid_port(port_id))
429                 return -1;
430         return rte_eth_devices[port_id].data->numa_node;
431 }
432
433 uint8_t
434 rte_eth_dev_count(void)
435 {
436         return nb_ports;
437 }
438
439 static enum rte_eth_dev_type
440 rte_eth_dev_get_device_type(uint8_t port_id)
441 {
442         if (!rte_eth_dev_is_valid_port(port_id))
443                 return RTE_ETH_DEV_UNKNOWN;
444         return rte_eth_devices[port_id].dev_type;
445 }
446
447 static int
448 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
449 {
450         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
451
452         if (addr == NULL) {
453                 PMD_DEBUG_TRACE("Null pointer is specified\n");
454                 return -EINVAL;
455         }
456
457         *addr = rte_eth_devices[port_id].pci_dev->addr;
458         return 0;
459 }
460
461 static int
462 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
463 {
464         char *tmp;
465
466         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
467
468         if (name == NULL) {
469                 PMD_DEBUG_TRACE("Null pointer is specified\n");
470                 return -EINVAL;
471         }
472
473         /* shouldn't check 'rte_eth_devices[i].data',
474          * because it might be overwritten by VDEV PMD */
475         tmp = rte_eth_dev_data[port_id].name;
476         strcpy(name, tmp);
477         return 0;
478 }
479
480 static int
481 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
482 {
483         int i;
484
485         if (name == NULL) {
486                 PMD_DEBUG_TRACE("Null pointer is specified\n");
487                 return -EINVAL;
488         }
489
490         *port_id = RTE_MAX_ETHPORTS;
491
492         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
493
494                 if (!strncmp(name,
495                         rte_eth_dev_data[i].name, strlen(name))) {
496
497                         *port_id = i;
498
499                         return 0;
500                 }
501         }
502         return -ENODEV;
503 }
504
505 static int
506 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
507 {
508         int i;
509         struct rte_pci_device *pci_dev = NULL;
510
511         if (addr == NULL) {
512                 PMD_DEBUG_TRACE("Null pointer is specified\n");
513                 return -EINVAL;
514         }
515
516         *port_id = RTE_MAX_ETHPORTS;
517
518         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
519
520                 pci_dev = rte_eth_devices[i].pci_dev;
521
522                 if (pci_dev &&
523                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
524
525                         *port_id = i;
526
527                         return 0;
528                 }
529         }
530         return -ENODEV;
531 }
532
533 static int
534 rte_eth_dev_is_detachable(uint8_t port_id)
535 {
536         uint32_t dev_flags;
537
538         if (!rte_eth_dev_is_valid_port(port_id)) {
539                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
540                 return -EINVAL;
541         }
542
543         switch (rte_eth_devices[port_id].data->kdrv) {
544         case RTE_KDRV_IGB_UIO:
545         case RTE_KDRV_UIO_GENERIC:
546         case RTE_KDRV_NIC_UIO:
547         case RTE_KDRV_NONE:
548                 break;
549         case RTE_KDRV_VFIO:
550         default:
551                 return -ENOTSUP;
552         }
553         dev_flags = rte_eth_devices[port_id].data->dev_flags;
554         return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
555 }
556
557 /* attach the new physical device, then store port_id of the device */
558 static int
559 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
560 {
561         if ((addr == NULL) || (port_id == NULL))
562                 goto err;
563
564         /* re-construct pci_device_list */
565         if (rte_eal_pci_scan())
566                 goto err;
567         /* Invoke probe func of the driver can handle the new device. */
568         if (rte_eal_pci_probe_one(addr))
569                 goto err;
570
571         if (rte_eth_dev_get_port_by_addr(addr, port_id))
572                 goto err;
573
574         return 0;
575 err:
576         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
577         return -1;
578 }
579
580 /* detach the new physical device, then store pci_addr of the device */
581 static int
582 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
583 {
584         struct rte_pci_addr freed_addr;
585         struct rte_pci_addr vp;
586
587         if (addr == NULL)
588                 goto err;
589
590         /* check whether the driver supports detach feature, or not */
591         if (rte_eth_dev_is_detachable(port_id))
592                 goto err;
593
594         /* get pci address by port id */
595         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
596                 goto err;
597
598         /* Zeroed pci addr means the port comes from virtual device */
599         vp.domain = vp.bus = vp.devid = vp.function = 0;
600         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
601                 goto err;
602
603         /* invoke devuninit func of the pci driver,
604          * also remove the device from pci_device_list */
605         if (rte_eal_pci_detach(&freed_addr))
606                 goto err;
607
608         *addr = freed_addr;
609         return 0;
610 err:
611         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
612         return -1;
613 }
614
615 /* attach the new virtual device, then store port_id of the device */
616 static int
617 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
618 {
619         char *name = NULL, *args = NULL;
620         int ret = -1;
621
622         if ((vdevargs == NULL) || (port_id == NULL))
623                 goto end;
624
625         /* parse vdevargs, then retrieve device name and args */
626         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
627                 goto end;
628
629         /* walk around dev_driver_list to find the driver of the device,
630          * then invoke probe function of the driver.
631          * rte_eal_vdev_init() updates port_id allocated after
632          * initialization.
633          */
634         if (rte_eal_vdev_init(name, args))
635                 goto end;
636
637         if (rte_eth_dev_get_port_by_name(name, port_id))
638                 goto end;
639
640         ret = 0;
641 end:
642         if (name)
643                 free(name);
644         if (args)
645                 free(args);
646
647         if (ret < 0)
648                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
649         return ret;
650 }
651
652 /* detach the new virtual device, then store the name of the device */
653 static int
654 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
655 {
656         char name[RTE_ETH_NAME_MAX_LEN];
657
658         if (vdevname == NULL)
659                 goto err;
660
661         /* check whether the driver supports detach feature, or not */
662         if (rte_eth_dev_is_detachable(port_id))
663                 goto err;
664
665         /* get device name by port id */
666         if (rte_eth_dev_get_name_by_port(port_id, name))
667                 goto err;
668         /* walk around dev_driver_list to find the driver of the device,
669          * then invoke uninit function of the driver */
670         if (rte_eal_vdev_uninit(name))
671                 goto err;
672
673         strncpy(vdevname, name, sizeof(name));
674         return 0;
675 err:
676         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
677         return -1;
678 }
679
680 /* attach the new device, then store port_id of the device */
681 int
682 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
683 {
684         struct rte_pci_addr addr;
685
686         if ((devargs == NULL) || (port_id == NULL))
687                 return -EINVAL;
688
689         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
690                 return rte_eth_dev_attach_pdev(&addr, port_id);
691         else
692                 return rte_eth_dev_attach_vdev(devargs, port_id);
693 }
694
695 /* detach the device, then store the name of the device */
696 int
697 rte_eth_dev_detach(uint8_t port_id, char *name)
698 {
699         struct rte_pci_addr addr;
700         int ret;
701
702         if (name == NULL)
703                 return -EINVAL;
704
705         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
706                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
707                 if (ret < 0)
708                         return ret;
709
710                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
711                 if (ret == 0)
712                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
713                                 "%04x:%02x:%02x.%d",
714                                 addr.domain, addr.bus,
715                                 addr.devid, addr.function);
716
717                 return ret;
718         } else
719                 return rte_eth_dev_detach_vdev(port_id, name);
720 }
721
722 static int
723 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
724 {
725         uint16_t old_nb_queues = dev->data->nb_rx_queues;
726         void **rxq;
727         unsigned i;
728
729         if (dev->data->rx_queues == NULL) { /* first time configuration */
730                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
731                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
732                                 RTE_CACHE_LINE_SIZE);
733                 if (dev->data->rx_queues == NULL) {
734                         dev->data->nb_rx_queues = 0;
735                         return -(ENOMEM);
736                 }
737         } else { /* re-configure */
738                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
739
740                 rxq = dev->data->rx_queues;
741
742                 for (i = nb_queues; i < old_nb_queues; i++)
743                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
744                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
745                                 RTE_CACHE_LINE_SIZE);
746                 if (rxq == NULL)
747                         return -(ENOMEM);
748                 if (nb_queues > old_nb_queues) {
749                         uint16_t new_qs = nb_queues - old_nb_queues;
750
751                         memset(rxq + old_nb_queues, 0,
752                                 sizeof(rxq[0]) * new_qs);
753                 }
754
755                 dev->data->rx_queues = rxq;
756
757         }
758         dev->data->nb_rx_queues = nb_queues;
759         return 0;
760 }
761
762 int
763 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
764 {
765         struct rte_eth_dev *dev;
766
767         /* This function is only safe when called from the primary process
768          * in a multi-process setup*/
769         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
770
771         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
772
773         dev = &rte_eth_devices[port_id];
774         if (rx_queue_id >= dev->data->nb_rx_queues) {
775                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
776                 return -EINVAL;
777         }
778
779         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
780
781         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
782                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
783                         " already started\n",
784                         rx_queue_id, port_id);
785                 return 0;
786         }
787
788         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
789
790 }
791
792 int
793 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
794 {
795         struct rte_eth_dev *dev;
796
797         /* This function is only safe when called from the primary process
798          * in a multi-process setup*/
799         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
800
801         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
802
803         dev = &rte_eth_devices[port_id];
804         if (rx_queue_id >= dev->data->nb_rx_queues) {
805                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
806                 return -EINVAL;
807         }
808
809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
810
811         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
812                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
813                         " already stopped\n",
814                         rx_queue_id, port_id);
815                 return 0;
816         }
817
818         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
819
820 }
821
822 int
823 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
824 {
825         struct rte_eth_dev *dev;
826
827         /* This function is only safe when called from the primary process
828          * in a multi-process setup*/
829         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
830
831         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
832
833         dev = &rte_eth_devices[port_id];
834         if (tx_queue_id >= dev->data->nb_tx_queues) {
835                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
836                 return -EINVAL;
837         }
838
839         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
840
841         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
842                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
843                         " already started\n",
844                         tx_queue_id, port_id);
845                 return 0;
846         }
847
848         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
849
850 }
851
852 int
853 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
854 {
855         struct rte_eth_dev *dev;
856
857         /* This function is only safe when called from the primary process
858          * in a multi-process setup*/
859         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
860
861         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
862
863         dev = &rte_eth_devices[port_id];
864         if (tx_queue_id >= dev->data->nb_tx_queues) {
865                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
866                 return -EINVAL;
867         }
868
869         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
870
871         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
872                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
873                         " already stopped\n",
874                         tx_queue_id, port_id);
875                 return 0;
876         }
877
878         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
879
880 }
881
882 static int
883 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
884 {
885         uint16_t old_nb_queues = dev->data->nb_tx_queues;
886         void **txq;
887         unsigned i;
888
889         if (dev->data->tx_queues == NULL) { /* first time configuration */
890                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
891                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
892                                                    RTE_CACHE_LINE_SIZE);
893                 if (dev->data->tx_queues == NULL) {
894                         dev->data->nb_tx_queues = 0;
895                         return -(ENOMEM);
896                 }
897         } else { /* re-configure */
898                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
899
900                 txq = dev->data->tx_queues;
901
902                 for (i = nb_queues; i < old_nb_queues; i++)
903                         (*dev->dev_ops->tx_queue_release)(txq[i]);
904                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
905                                   RTE_CACHE_LINE_SIZE);
906                 if (txq == NULL)
907                         return -ENOMEM;
908                 if (nb_queues > old_nb_queues) {
909                         uint16_t new_qs = nb_queues - old_nb_queues;
910
911                         memset(txq + old_nb_queues, 0,
912                                sizeof(txq[0]) * new_qs);
913                 }
914
915                 dev->data->tx_queues = txq;
916
917         }
918         dev->data->nb_tx_queues = nb_queues;
919         return 0;
920 }
921
922 int
923 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
924                       const struct rte_eth_conf *dev_conf)
925 {
926         struct rte_eth_dev *dev;
927         struct rte_eth_dev_info dev_info;
928         int diag;
929
930         /* This function is only safe when called from the primary process
931          * in a multi-process setup*/
932         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
933
934         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
935
936         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
937                 PMD_DEBUG_TRACE(
938                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
939                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
940                 return -EINVAL;
941         }
942
943         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
944                 PMD_DEBUG_TRACE(
945                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
946                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
947                 return -EINVAL;
948         }
949
950         dev = &rte_eth_devices[port_id];
951
952         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
953         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
954
955         if (dev->data->dev_started) {
956                 PMD_DEBUG_TRACE(
957                     "port %d must be stopped to allow configuration\n", port_id);
958                 return -EBUSY;
959         }
960
961         /*
962          * Check that the numbers of RX and TX queues are not greater
963          * than the maximum number of RX and TX queues supported by the
964          * configured device.
965          */
966         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
967         if (nb_rx_q > dev_info.max_rx_queues) {
968                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
969                                 port_id, nb_rx_q, dev_info.max_rx_queues);
970                 return -EINVAL;
971         }
972         if (nb_rx_q == 0) {
973                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
974                 return -EINVAL;
975         }
976
977         if (nb_tx_q > dev_info.max_tx_queues) {
978                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
979                                 port_id, nb_tx_q, dev_info.max_tx_queues);
980                 return -EINVAL;
981         }
982         if (nb_tx_q == 0) {
983                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
984                 return -EINVAL;
985         }
986
987         /* Copy the dev_conf parameter into the dev structure */
988         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
989
990         /*
991          * If link state interrupt is enabled, check that the
992          * device supports it.
993          */
994         if ((dev_conf->intr_conf.lsc == 1) &&
995                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
996                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
997                                         dev->data->drv_name);
998                         return -EINVAL;
999         }
1000
1001         /*
1002          * If jumbo frames are enabled, check that the maximum RX packet
1003          * length is supported by the configured device.
1004          */
1005         if (dev_conf->rxmode.jumbo_frame == 1) {
1006                 if (dev_conf->rxmode.max_rx_pkt_len >
1007                     dev_info.max_rx_pktlen) {
1008                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1009                                 " > max valid value %u\n",
1010                                 port_id,
1011                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1012                                 (unsigned)dev_info.max_rx_pktlen);
1013                         return -EINVAL;
1014                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1015                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1016                                 " < min valid value %u\n",
1017                                 port_id,
1018                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1019                                 (unsigned)ETHER_MIN_LEN);
1020                         return -EINVAL;
1021                 }
1022         } else {
1023                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1024                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1025                         /* Use default value */
1026                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1027                                                         ETHER_MAX_LEN;
1028         }
1029
1030         /*
1031          * Setup new number of RX/TX queues and reconfigure device.
1032          */
1033         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1034         if (diag != 0) {
1035                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1036                                 port_id, diag);
1037                 return diag;
1038         }
1039
1040         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1041         if (diag != 0) {
1042                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1043                                 port_id, diag);
1044                 rte_eth_dev_rx_queue_config(dev, 0);
1045                 return diag;
1046         }
1047
1048         diag = (*dev->dev_ops->dev_configure)(dev);
1049         if (diag != 0) {
1050                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1051                                 port_id, diag);
1052                 rte_eth_dev_rx_queue_config(dev, 0);
1053                 rte_eth_dev_tx_queue_config(dev, 0);
1054                 return diag;
1055         }
1056
1057         return 0;
1058 }
1059
1060 static void
1061 rte_eth_dev_config_restore(uint8_t port_id)
1062 {
1063         struct rte_eth_dev *dev;
1064         struct rte_eth_dev_info dev_info;
1065         struct ether_addr addr;
1066         uint16_t i;
1067         uint32_t pool = 0;
1068
1069         dev = &rte_eth_devices[port_id];
1070
1071         rte_eth_dev_info_get(port_id, &dev_info);
1072
1073         if (RTE_ETH_DEV_SRIOV(dev).active)
1074                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1075
1076         /* replay MAC address configuration */
1077         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1078                 addr = dev->data->mac_addrs[i];
1079
1080                 /* skip zero address */
1081                 if (is_zero_ether_addr(&addr))
1082                         continue;
1083
1084                 /* add address to the hardware */
1085                 if  (*dev->dev_ops->mac_addr_add &&
1086                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1087                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1088                 else {
1089                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1090                                         port_id);
1091                         /* exit the loop but not return an error */
1092                         break;
1093                 }
1094         }
1095
1096         /* replay promiscuous configuration */
1097         if (rte_eth_promiscuous_get(port_id) == 1)
1098                 rte_eth_promiscuous_enable(port_id);
1099         else if (rte_eth_promiscuous_get(port_id) == 0)
1100                 rte_eth_promiscuous_disable(port_id);
1101
1102         /* replay all multicast configuration */
1103         if (rte_eth_allmulticast_get(port_id) == 1)
1104                 rte_eth_allmulticast_enable(port_id);
1105         else if (rte_eth_allmulticast_get(port_id) == 0)
1106                 rte_eth_allmulticast_disable(port_id);
1107 }
1108
1109 int
1110 rte_eth_dev_start(uint8_t port_id)
1111 {
1112         struct rte_eth_dev *dev;
1113         int diag;
1114
1115         /* This function is only safe when called from the primary process
1116          * in a multi-process setup*/
1117         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1118
1119         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1120
1121         dev = &rte_eth_devices[port_id];
1122
1123         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1124
1125         if (dev->data->dev_started != 0) {
1126                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1127                         " already started\n",
1128                         port_id);
1129                 return 0;
1130         }
1131
1132         diag = (*dev->dev_ops->dev_start)(dev);
1133         if (diag == 0)
1134                 dev->data->dev_started = 1;
1135         else
1136                 return diag;
1137
1138         rte_eth_dev_config_restore(port_id);
1139
1140         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1141                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1142                 (*dev->dev_ops->link_update)(dev, 0);
1143         }
1144         return 0;
1145 }
1146
1147 void
1148 rte_eth_dev_stop(uint8_t port_id)
1149 {
1150         struct rte_eth_dev *dev;
1151
1152         /* This function is only safe when called from the primary process
1153          * in a multi-process setup*/
1154         PROC_PRIMARY_OR_RET();
1155
1156         VALID_PORTID_OR_RET(port_id);
1157         dev = &rte_eth_devices[port_id];
1158
1159         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1160
1161         if (dev->data->dev_started == 0) {
1162                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1163                         " already stopped\n",
1164                         port_id);
1165                 return;
1166         }
1167
1168         dev->data->dev_started = 0;
1169         (*dev->dev_ops->dev_stop)(dev);
1170 }
1171
1172 int
1173 rte_eth_dev_set_link_up(uint8_t port_id)
1174 {
1175         struct rte_eth_dev *dev;
1176
1177         /* This function is only safe when called from the primary process
1178          * in a multi-process setup*/
1179         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1180
1181         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1182
1183         dev = &rte_eth_devices[port_id];
1184
1185         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1186         return (*dev->dev_ops->dev_set_link_up)(dev);
1187 }
1188
1189 int
1190 rte_eth_dev_set_link_down(uint8_t port_id)
1191 {
1192         struct rte_eth_dev *dev;
1193
1194         /* This function is only safe when called from the primary process
1195          * in a multi-process setup*/
1196         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1197
1198         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1199
1200         dev = &rte_eth_devices[port_id];
1201
1202         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1203         return (*dev->dev_ops->dev_set_link_down)(dev);
1204 }
1205
1206 void
1207 rte_eth_dev_close(uint8_t port_id)
1208 {
1209         struct rte_eth_dev *dev;
1210
1211         /* This function is only safe when called from the primary process
1212          * in a multi-process setup*/
1213         PROC_PRIMARY_OR_RET();
1214
1215         VALID_PORTID_OR_RET(port_id);
1216         dev = &rte_eth_devices[port_id];
1217
1218         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1219         dev->data->dev_started = 0;
1220         (*dev->dev_ops->dev_close)(dev);
1221
1222         rte_free(dev->data->rx_queues);
1223         dev->data->rx_queues = NULL;
1224         rte_free(dev->data->tx_queues);
1225         dev->data->tx_queues = NULL;
1226 }
1227
1228 int
1229 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1230                        uint16_t nb_rx_desc, unsigned int socket_id,
1231                        const struct rte_eth_rxconf *rx_conf,
1232                        struct rte_mempool *mp)
1233 {
1234         int ret;
1235         uint32_t mbp_buf_size;
1236         struct rte_eth_dev *dev;
1237         struct rte_eth_dev_info dev_info;
1238
1239         /* This function is only safe when called from the primary process
1240          * in a multi-process setup*/
1241         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1242
1243         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1244
1245         dev = &rte_eth_devices[port_id];
1246         if (rx_queue_id >= dev->data->nb_rx_queues) {
1247                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1248                 return -EINVAL;
1249         }
1250
1251         if (dev->data->dev_started) {
1252                 PMD_DEBUG_TRACE(
1253                     "port %d must be stopped to allow configuration\n", port_id);
1254                 return -EBUSY;
1255         }
1256
1257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1258         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1259
1260         /*
1261          * Check the size of the mbuf data buffer.
1262          * This value must be provided in the private data of the memory pool.
1263          * First check that the memory pool has a valid private data.
1264          */
1265         rte_eth_dev_info_get(port_id, &dev_info);
1266         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1267                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1268                                 mp->name, (int) mp->private_data_size,
1269                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1270                 return -ENOSPC;
1271         }
1272         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1273
1274         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1275                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1276                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1277                                 "=%d)\n",
1278                                 mp->name,
1279                                 (int)mbp_buf_size,
1280                                 (int)(RTE_PKTMBUF_HEADROOM +
1281                                       dev_info.min_rx_bufsize),
1282                                 (int)RTE_PKTMBUF_HEADROOM,
1283                                 (int)dev_info.min_rx_bufsize);
1284                 return -EINVAL;
1285         }
1286
1287         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1288                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1289                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1290
1291                 PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1292                         "should be: <= %hu, = %hu, and a product of %hu\n",
1293                         nb_rx_desc,
1294                         dev_info.rx_desc_lim.nb_max,
1295                         dev_info.rx_desc_lim.nb_min,
1296                         dev_info.rx_desc_lim.nb_align);
1297                 return -EINVAL;
1298         }
1299
1300         if (rx_conf == NULL)
1301                 rx_conf = &dev_info.default_rxconf;
1302
1303         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1304                                               socket_id, rx_conf, mp);
1305         if (!ret) {
1306                 if (!dev->data->min_rx_buf_size ||
1307                     dev->data->min_rx_buf_size > mbp_buf_size)
1308                         dev->data->min_rx_buf_size = mbp_buf_size;
1309         }
1310
1311         return ret;
1312 }
1313
1314 int
1315 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1316                        uint16_t nb_tx_desc, unsigned int socket_id,
1317                        const struct rte_eth_txconf *tx_conf)
1318 {
1319         struct rte_eth_dev *dev;
1320         struct rte_eth_dev_info dev_info;
1321
1322         /* This function is only safe when called from the primary process
1323          * in a multi-process setup*/
1324         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1325
1326         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1327
1328         dev = &rte_eth_devices[port_id];
1329         if (tx_queue_id >= dev->data->nb_tx_queues) {
1330                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1331                 return -EINVAL;
1332         }
1333
1334         if (dev->data->dev_started) {
1335                 PMD_DEBUG_TRACE(
1336                     "port %d must be stopped to allow configuration\n", port_id);
1337                 return -EBUSY;
1338         }
1339
1340         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1342
1343         rte_eth_dev_info_get(port_id, &dev_info);
1344
1345         if (tx_conf == NULL)
1346                 tx_conf = &dev_info.default_txconf;
1347
1348         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1349                                                socket_id, tx_conf);
1350 }
1351
1352 void
1353 rte_eth_promiscuous_enable(uint8_t port_id)
1354 {
1355         struct rte_eth_dev *dev;
1356
1357         VALID_PORTID_OR_RET(port_id);
1358         dev = &rte_eth_devices[port_id];
1359
1360         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1361         (*dev->dev_ops->promiscuous_enable)(dev);
1362         dev->data->promiscuous = 1;
1363 }
1364
1365 void
1366 rte_eth_promiscuous_disable(uint8_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369
1370         VALID_PORTID_OR_RET(port_id);
1371         dev = &rte_eth_devices[port_id];
1372
1373         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1374         dev->data->promiscuous = 0;
1375         (*dev->dev_ops->promiscuous_disable)(dev);
1376 }
1377
1378 int
1379 rte_eth_promiscuous_get(uint8_t port_id)
1380 {
1381         struct rte_eth_dev *dev;
1382
1383         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1384
1385         dev = &rte_eth_devices[port_id];
1386         return dev->data->promiscuous;
1387 }
1388
1389 void
1390 rte_eth_allmulticast_enable(uint8_t port_id)
1391 {
1392         struct rte_eth_dev *dev;
1393
1394         VALID_PORTID_OR_RET(port_id);
1395         dev = &rte_eth_devices[port_id];
1396
1397         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1398         (*dev->dev_ops->allmulticast_enable)(dev);
1399         dev->data->all_multicast = 1;
1400 }
1401
1402 void
1403 rte_eth_allmulticast_disable(uint8_t port_id)
1404 {
1405         struct rte_eth_dev *dev;
1406
1407         VALID_PORTID_OR_RET(port_id);
1408         dev = &rte_eth_devices[port_id];
1409
1410         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1411         dev->data->all_multicast = 0;
1412         (*dev->dev_ops->allmulticast_disable)(dev);
1413 }
1414
1415 int
1416 rte_eth_allmulticast_get(uint8_t port_id)
1417 {
1418         struct rte_eth_dev *dev;
1419
1420         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1421
1422         dev = &rte_eth_devices[port_id];
1423         return dev->data->all_multicast;
1424 }
1425
1426 static inline int
1427 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1428                                 struct rte_eth_link *link)
1429 {
1430         struct rte_eth_link *dst = link;
1431         struct rte_eth_link *src = &(dev->data->dev_link);
1432
1433         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1434                                         *(uint64_t *)src) == 0)
1435                 return -1;
1436
1437         return 0;
1438 }
1439
1440 void
1441 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1442 {
1443         struct rte_eth_dev *dev;
1444
1445         VALID_PORTID_OR_RET(port_id);
1446         dev = &rte_eth_devices[port_id];
1447
1448         if (dev->data->dev_conf.intr_conf.lsc != 0)
1449                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1450         else {
1451                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1452                 (*dev->dev_ops->link_update)(dev, 1);
1453                 *eth_link = dev->data->dev_link;
1454         }
1455 }
1456
1457 void
1458 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1459 {
1460         struct rte_eth_dev *dev;
1461
1462         VALID_PORTID_OR_RET(port_id);
1463         dev = &rte_eth_devices[port_id];
1464
1465         if (dev->data->dev_conf.intr_conf.lsc != 0)
1466                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1467         else {
1468                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1469                 (*dev->dev_ops->link_update)(dev, 0);
1470                 *eth_link = dev->data->dev_link;
1471         }
1472 }
1473
1474 int
1475 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1476 {
1477         struct rte_eth_dev *dev;
1478
1479         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1480
1481         dev = &rte_eth_devices[port_id];
1482         memset(stats, 0, sizeof(*stats));
1483
1484         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1485         (*dev->dev_ops->stats_get)(dev, stats);
1486         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1487         return 0;
1488 }
1489
1490 void
1491 rte_eth_stats_reset(uint8_t port_id)
1492 {
1493         struct rte_eth_dev *dev;
1494
1495         VALID_PORTID_OR_RET(port_id);
1496         dev = &rte_eth_devices[port_id];
1497
1498         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1499         (*dev->dev_ops->stats_reset)(dev);
1500 }
1501
1502 /* retrieve ethdev extended statistics */
1503 int
1504 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1505         unsigned n)
1506 {
1507         struct rte_eth_stats eth_stats;
1508         struct rte_eth_dev *dev;
1509         unsigned count = 0, i, q;
1510         signed xcount = 0;
1511         uint64_t val, *stats_ptr;
1512
1513         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1514
1515         dev = &rte_eth_devices[port_id];
1516
1517         /* Return generic statistics */
1518         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1519                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1520
1521         /* implemented by the driver */
1522         if (dev->dev_ops->xstats_get != NULL) {
1523                 /* Retrieve the xstats from the driver at the end of the
1524                  * xstats struct.
1525                  */
1526                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1527                          (n > count) ? n - count : 0);
1528
1529                 if (xcount < 0)
1530                         return xcount;
1531         }
1532
1533         if (n < count + xcount)
1534                 return count + xcount;
1535
1536         /* now fill the xstats structure */
1537         count = 0;
1538         rte_eth_stats_get(port_id, &eth_stats);
1539
1540         /* global stats */
1541         for (i = 0; i < RTE_NB_STATS; i++) {
1542                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1543                                         rte_stats_strings[i].offset);
1544                 val = *stats_ptr;
1545                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1546                         "%s", rte_stats_strings[i].name);
1547                 xstats[count++].value = val;
1548         }
1549
1550         /* per-rxq stats */
1551         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1552                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1553                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1554                                         rte_rxq_stats_strings[i].offset +
1555                                         q * sizeof(uint64_t));
1556                         val = *stats_ptr;
1557                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1558                                 "rx_q%u_%s", q,
1559                                 rte_rxq_stats_strings[i].name);
1560                         xstats[count++].value = val;
1561                 }
1562         }
1563
1564         /* per-txq stats */
1565         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1566                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1567                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1568                                         rte_txq_stats_strings[i].offset +
1569                                         q * sizeof(uint64_t));
1570                         val = *stats_ptr;
1571                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1572                                 "tx_q%u_%s", q,
1573                                 rte_txq_stats_strings[i].name);
1574                         xstats[count++].value = val;
1575                 }
1576         }
1577
1578         return count + xcount;
1579 }
1580
1581 /* reset ethdev extended statistics */
1582 void
1583 rte_eth_xstats_reset(uint8_t port_id)
1584 {
1585         struct rte_eth_dev *dev;
1586
1587         VALID_PORTID_OR_RET(port_id);
1588         dev = &rte_eth_devices[port_id];
1589
1590         /* implemented by the driver */
1591         if (dev->dev_ops->xstats_reset != NULL) {
1592                 (*dev->dev_ops->xstats_reset)(dev);
1593                 return;
1594         }
1595
1596         /* fallback to default */
1597         rte_eth_stats_reset(port_id);
1598 }
1599
1600 static int
1601 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1602                 uint8_t is_rx)
1603 {
1604         struct rte_eth_dev *dev;
1605
1606         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1607
1608         dev = &rte_eth_devices[port_id];
1609
1610         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1611         return (*dev->dev_ops->queue_stats_mapping_set)
1612                         (dev, queue_id, stat_idx, is_rx);
1613 }
1614
1615
1616 int
1617 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1618                 uint8_t stat_idx)
1619 {
1620         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1621                         STAT_QMAP_TX);
1622 }
1623
1624
1625 int
1626 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1627                 uint8_t stat_idx)
1628 {
1629         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1630                         STAT_QMAP_RX);
1631 }
1632
1633
1634 void
1635 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1636 {
1637         struct rte_eth_dev *dev;
1638         const struct rte_eth_desc_lim lim = {
1639                 .nb_max = UINT16_MAX,
1640                 .nb_min = 0,
1641                 .nb_align = 1,
1642         };
1643
1644         VALID_PORTID_OR_RET(port_id);
1645         dev = &rte_eth_devices[port_id];
1646
1647         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1648         dev_info->rx_desc_lim = lim;
1649         dev_info->tx_desc_lim = lim;
1650
1651         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1652         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1653         dev_info->pci_dev = dev->pci_dev;
1654         dev_info->driver_name = dev->data->drv_name;
1655 }
1656
1657 void
1658 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1659 {
1660         struct rte_eth_dev *dev;
1661
1662         VALID_PORTID_OR_RET(port_id);
1663         dev = &rte_eth_devices[port_id];
1664         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1665 }
1666
1667
1668 int
1669 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1670 {
1671         struct rte_eth_dev *dev;
1672
1673         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1674
1675         dev = &rte_eth_devices[port_id];
1676         *mtu = dev->data->mtu;
1677         return 0;
1678 }
1679
1680 int
1681 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1682 {
1683         int ret;
1684         struct rte_eth_dev *dev;
1685
1686         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1687         dev = &rte_eth_devices[port_id];
1688         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1689
1690         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1691         if (!ret)
1692                 dev->data->mtu = mtu;
1693
1694         return ret;
1695 }
1696
1697 int
1698 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1699 {
1700         struct rte_eth_dev *dev;
1701
1702         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1703         dev = &rte_eth_devices[port_id];
1704         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1705                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1706                 return -ENOSYS;
1707         }
1708
1709         if (vlan_id > 4095) {
1710                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1711                                 port_id, (unsigned) vlan_id);
1712                 return -EINVAL;
1713         }
1714         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1715
1716         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1717 }
1718
1719 int
1720 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1721 {
1722         struct rte_eth_dev *dev;
1723
1724         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1725         dev = &rte_eth_devices[port_id];
1726         if (rx_queue_id >= dev->data->nb_rx_queues) {
1727                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1728                 return -EINVAL;
1729         }
1730
1731         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1732         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1733
1734         return 0;
1735 }
1736
1737 int
1738 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1739 {
1740         struct rte_eth_dev *dev;
1741
1742         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1743         dev = &rte_eth_devices[port_id];
1744         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1745         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1746
1747         return 0;
1748 }
1749
1750 int
1751 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1752 {
1753         struct rte_eth_dev *dev;
1754         int ret = 0;
1755         int mask = 0;
1756         int cur, org = 0;
1757
1758         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1759         dev = &rte_eth_devices[port_id];
1760
1761         /*check which option changed by application*/
1762         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1763         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1764         if (cur != org) {
1765                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1766                 mask |= ETH_VLAN_STRIP_MASK;
1767         }
1768
1769         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1770         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1771         if (cur != org) {
1772                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1773                 mask |= ETH_VLAN_FILTER_MASK;
1774         }
1775
1776         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1777         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1778         if (cur != org) {
1779                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1780                 mask |= ETH_VLAN_EXTEND_MASK;
1781         }
1782
1783         /*no change*/
1784         if (mask == 0)
1785                 return ret;
1786
1787         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1788         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1789
1790         return ret;
1791 }
1792
1793 int
1794 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1795 {
1796         struct rte_eth_dev *dev;
1797         int ret = 0;
1798
1799         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1800         dev = &rte_eth_devices[port_id];
1801
1802         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1803                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1804
1805         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1806                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1807
1808         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1809                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1810
1811         return ret;
1812 }
1813
1814 int
1815 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1816 {
1817         struct rte_eth_dev *dev;
1818
1819         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1820         dev = &rte_eth_devices[port_id];
1821         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1822         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1823
1824         return 0;
1825 }
1826
1827 int
1828 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1829 {
1830         struct rte_eth_dev *dev;
1831
1832         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1833         dev = &rte_eth_devices[port_id];
1834         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1835         memset(fc_conf, 0, sizeof(*fc_conf));
1836         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1837 }
1838
1839 int
1840 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1841 {
1842         struct rte_eth_dev *dev;
1843
1844         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1845         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1846                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1847                 return -EINVAL;
1848         }
1849
1850         dev = &rte_eth_devices[port_id];
1851         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1852         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1853 }
1854
1855 int
1856 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1857 {
1858         struct rte_eth_dev *dev;
1859
1860         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1861         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1862                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1863                 return -EINVAL;
1864         }
1865
1866         dev = &rte_eth_devices[port_id];
1867         /* High water, low water validation are device specific */
1868         if  (*dev->dev_ops->priority_flow_ctrl_set)
1869                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1870         return -ENOTSUP;
1871 }
1872
1873 static int
1874 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1875                         uint16_t reta_size)
1876 {
1877         uint16_t i, num;
1878
1879         if (!reta_conf)
1880                 return -EINVAL;
1881
1882         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1883                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1884                                                         RTE_RETA_GROUP_SIZE);
1885                 return -EINVAL;
1886         }
1887
1888         num = reta_size / RTE_RETA_GROUP_SIZE;
1889         for (i = 0; i < num; i++) {
1890                 if (reta_conf[i].mask)
1891                         return 0;
1892         }
1893
1894         return -EINVAL;
1895 }
1896
1897 static int
1898 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1899                          uint16_t reta_size,
1900                          uint8_t max_rxq)
1901 {
1902         uint16_t i, idx, shift;
1903
1904         if (!reta_conf)
1905                 return -EINVAL;
1906
1907         if (max_rxq == 0) {
1908                 PMD_DEBUG_TRACE("No receive queue is available\n");
1909                 return -EINVAL;
1910         }
1911
1912         for (i = 0; i < reta_size; i++) {
1913                 idx = i / RTE_RETA_GROUP_SIZE;
1914                 shift = i % RTE_RETA_GROUP_SIZE;
1915                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1916                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1917                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1918                                 "the maximum rxq index: %u\n", idx, shift,
1919                                 reta_conf[idx].reta[shift], max_rxq);
1920                         return -EINVAL;
1921                 }
1922         }
1923
1924         return 0;
1925 }
1926
1927 int
1928 rte_eth_dev_rss_reta_update(uint8_t port_id,
1929                             struct rte_eth_rss_reta_entry64 *reta_conf,
1930                             uint16_t reta_size)
1931 {
1932         struct rte_eth_dev *dev;
1933         int ret;
1934
1935         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1936         /* Check mask bits */
1937         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1938         if (ret < 0)
1939                 return ret;
1940
1941         dev = &rte_eth_devices[port_id];
1942
1943         /* Check entry value */
1944         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1945                                 dev->data->nb_rx_queues);
1946         if (ret < 0)
1947                 return ret;
1948
1949         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1950         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1951 }
1952
1953 int
1954 rte_eth_dev_rss_reta_query(uint8_t port_id,
1955                            struct rte_eth_rss_reta_entry64 *reta_conf,
1956                            uint16_t reta_size)
1957 {
1958         struct rte_eth_dev *dev;
1959         int ret;
1960
1961         if (port_id >= nb_ports) {
1962                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1963                 return -ENODEV;
1964         }
1965
1966         /* Check mask bits */
1967         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1968         if (ret < 0)
1969                 return ret;
1970
1971         dev = &rte_eth_devices[port_id];
1972         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1973         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1974 }
1975
1976 int
1977 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1978 {
1979         struct rte_eth_dev *dev;
1980         uint16_t rss_hash_protos;
1981
1982         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1983         rss_hash_protos = rss_conf->rss_hf;
1984         if ((rss_hash_protos != 0) &&
1985             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1986                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1987                                 rss_hash_protos);
1988                 return -EINVAL;
1989         }
1990         dev = &rte_eth_devices[port_id];
1991         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1992         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1993 }
1994
1995 int
1996 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1997                               struct rte_eth_rss_conf *rss_conf)
1998 {
1999         struct rte_eth_dev *dev;
2000
2001         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2002         dev = &rte_eth_devices[port_id];
2003         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2004         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2005 }
2006
2007 int
2008 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2009                            struct rte_eth_udp_tunnel *udp_tunnel)
2010 {
2011         struct rte_eth_dev *dev;
2012
2013         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2014         if (udp_tunnel == NULL) {
2015                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2016                 return -EINVAL;
2017         }
2018
2019         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2020                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2021                 return -EINVAL;
2022         }
2023
2024         dev = &rte_eth_devices[port_id];
2025         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2026         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2027 }
2028
2029 int
2030 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2031                               struct rte_eth_udp_tunnel *udp_tunnel)
2032 {
2033         struct rte_eth_dev *dev;
2034
2035         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2036         dev = &rte_eth_devices[port_id];
2037
2038         if (udp_tunnel == NULL) {
2039                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2040                 return -EINVAL;
2041         }
2042
2043         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2044                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2045                 return -EINVAL;
2046         }
2047
2048         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2049         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2050 }
2051
2052 int
2053 rte_eth_led_on(uint8_t port_id)
2054 {
2055         struct rte_eth_dev *dev;
2056
2057         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2058         dev = &rte_eth_devices[port_id];
2059         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2060         return (*dev->dev_ops->dev_led_on)(dev);
2061 }
2062
2063 int
2064 rte_eth_led_off(uint8_t port_id)
2065 {
2066         struct rte_eth_dev *dev;
2067
2068         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2069         dev = &rte_eth_devices[port_id];
2070         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2071         return (*dev->dev_ops->dev_led_off)(dev);
2072 }
2073
2074 /*
2075  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2076  * an empty spot.
2077  */
2078 static int
2079 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2080 {
2081         struct rte_eth_dev_info dev_info;
2082         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2083         unsigned i;
2084
2085         rte_eth_dev_info_get(port_id, &dev_info);
2086
2087         for (i = 0; i < dev_info.max_mac_addrs; i++)
2088                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2089                         return i;
2090
2091         return -1;
2092 }
2093
2094 static const struct ether_addr null_mac_addr;
2095
2096 int
2097 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2098                         uint32_t pool)
2099 {
2100         struct rte_eth_dev *dev;
2101         int index;
2102         uint64_t pool_mask;
2103
2104         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2105         dev = &rte_eth_devices[port_id];
2106         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2107
2108         if (is_zero_ether_addr(addr)) {
2109                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2110                         port_id);
2111                 return -EINVAL;
2112         }
2113         if (pool >= ETH_64_POOLS) {
2114                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2115                 return -EINVAL;
2116         }
2117
2118         index = get_mac_addr_index(port_id, addr);
2119         if (index < 0) {
2120                 index = get_mac_addr_index(port_id, &null_mac_addr);
2121                 if (index < 0) {
2122                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2123                                 port_id);
2124                         return -ENOSPC;
2125                 }
2126         } else {
2127                 pool_mask = dev->data->mac_pool_sel[index];
2128
2129                 /* Check if both MAC address and pool is already there, and do nothing */
2130                 if (pool_mask & (1ULL << pool))
2131                         return 0;
2132         }
2133
2134         /* Update NIC */
2135         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2136
2137         /* Update address in NIC data structure */
2138         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2139
2140         /* Update pool bitmap in NIC data structure */
2141         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2142
2143         return 0;
2144 }
2145
2146 int
2147 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2148 {
2149         struct rte_eth_dev *dev;
2150         int index;
2151
2152         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2153         dev = &rte_eth_devices[port_id];
2154         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2155
2156         index = get_mac_addr_index(port_id, addr);
2157         if (index == 0) {
2158                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2159                 return -EADDRINUSE;
2160         } else if (index < 0)
2161                 return 0;  /* Do nothing if address wasn't found */
2162
2163         /* Update NIC */
2164         (*dev->dev_ops->mac_addr_remove)(dev, index);
2165
2166         /* Update address in NIC data structure */
2167         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2168
2169         /* reset pool bitmap */
2170         dev->data->mac_pool_sel[index] = 0;
2171
2172         return 0;
2173 }
2174
2175 int
2176 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2177 {
2178         struct rte_eth_dev *dev;
2179
2180         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2181
2182         if (!is_valid_assigned_ether_addr(addr))
2183                 return -EINVAL;
2184
2185         dev = &rte_eth_devices[port_id];
2186         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2187
2188         /* Update default address in NIC data structure */
2189         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2190
2191         (*dev->dev_ops->mac_addr_set)(dev, addr);
2192
2193         return 0;
2194 }
2195
2196 int
2197 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2198                                 uint16_t rx_mode, uint8_t on)
2199 {
2200         uint16_t num_vfs;
2201         struct rte_eth_dev *dev;
2202         struct rte_eth_dev_info dev_info;
2203
2204         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2205
2206         dev = &rte_eth_devices[port_id];
2207         rte_eth_dev_info_get(port_id, &dev_info);
2208
2209         num_vfs = dev_info.max_vfs;
2210         if (vf > num_vfs) {
2211                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2212                 return -EINVAL;
2213         }
2214
2215         if (rx_mode == 0) {
2216                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2217                 return -EINVAL;
2218         }
2219         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2220         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2221 }
2222
2223 /*
2224  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2225  * an empty spot.
2226  */
2227 static int
2228 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2229 {
2230         struct rte_eth_dev_info dev_info;
2231         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2232         unsigned i;
2233
2234         rte_eth_dev_info_get(port_id, &dev_info);
2235         if (!dev->data->hash_mac_addrs)
2236                 return -1;
2237
2238         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2239                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2240                         ETHER_ADDR_LEN) == 0)
2241                         return i;
2242
2243         return -1;
2244 }
2245
2246 int
2247 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2248                                 uint8_t on)
2249 {
2250         int index;
2251         int ret;
2252         struct rte_eth_dev *dev;
2253
2254         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2255
2256         dev = &rte_eth_devices[port_id];
2257         if (is_zero_ether_addr(addr)) {
2258                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2259                         port_id);
2260                 return -EINVAL;
2261         }
2262
2263         index = get_hash_mac_addr_index(port_id, addr);
2264         /* Check if it's already there, and do nothing */
2265         if ((index >= 0) && (on))
2266                 return 0;
2267
2268         if (index < 0) {
2269                 if (!on) {
2270                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2271                                 "set in UTA\n", port_id);
2272                         return -EINVAL;
2273                 }
2274
2275                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2276                 if (index < 0) {
2277                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2278                                         port_id);
2279                         return -ENOSPC;
2280                 }
2281         }
2282
2283         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2284         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2285         if (ret == 0) {
2286                 /* Update address in NIC data structure */
2287                 if (on)
2288                         ether_addr_copy(addr,
2289                                         &dev->data->hash_mac_addrs[index]);
2290                 else
2291                         ether_addr_copy(&null_mac_addr,
2292                                         &dev->data->hash_mac_addrs[index]);
2293         }
2294
2295         return ret;
2296 }
2297
2298 int
2299 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2300 {
2301         struct rte_eth_dev *dev;
2302
2303         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2304
2305         dev = &rte_eth_devices[port_id];
2306
2307         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2308         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2309 }
2310
2311 int
2312 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2313 {
2314         uint16_t num_vfs;
2315         struct rte_eth_dev *dev;
2316         struct rte_eth_dev_info dev_info;
2317
2318         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2319
2320         dev = &rte_eth_devices[port_id];
2321         rte_eth_dev_info_get(port_id, &dev_info);
2322
2323         num_vfs = dev_info.max_vfs;
2324         if (vf > num_vfs) {
2325                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2326                 return -EINVAL;
2327         }
2328
2329         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2330         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2331 }
2332
2333 int
2334 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2335 {
2336         uint16_t num_vfs;
2337         struct rte_eth_dev *dev;
2338         struct rte_eth_dev_info dev_info;
2339
2340         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341
2342         dev = &rte_eth_devices[port_id];
2343         rte_eth_dev_info_get(port_id, &dev_info);
2344
2345         num_vfs = dev_info.max_vfs;
2346         if (vf > num_vfs) {
2347                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2348                 return -EINVAL;
2349         }
2350
2351         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2352         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2353 }
2354
2355 int
2356 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2357                                uint64_t vf_mask, uint8_t vlan_on)
2358 {
2359         struct rte_eth_dev *dev;
2360
2361         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2362
2363         dev = &rte_eth_devices[port_id];
2364
2365         if (vlan_id > ETHER_MAX_VLAN_ID) {
2366                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2367                         vlan_id);
2368                 return -EINVAL;
2369         }
2370
2371         if (vf_mask == 0) {
2372                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2373                 return -EINVAL;
2374         }
2375
2376         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2377         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2378                                                    vf_mask, vlan_on);
2379 }
2380
2381 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2382                                         uint16_t tx_rate)
2383 {
2384         struct rte_eth_dev *dev;
2385         struct rte_eth_dev_info dev_info;
2386         struct rte_eth_link link;
2387
2388         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2389
2390         dev = &rte_eth_devices[port_id];
2391         rte_eth_dev_info_get(port_id, &dev_info);
2392         link = dev->data->dev_link;
2393
2394         if (queue_idx > dev_info.max_tx_queues) {
2395                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2396                                 "invalid queue id=%d\n", port_id, queue_idx);
2397                 return -EINVAL;
2398         }
2399
2400         if (tx_rate > link.link_speed) {
2401                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2402                                 "bigger than link speed= %d\n",
2403                         tx_rate, link.link_speed);
2404                 return -EINVAL;
2405         }
2406
2407         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2408         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2409 }
2410
2411 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2412                                 uint64_t q_msk)
2413 {
2414         struct rte_eth_dev *dev;
2415         struct rte_eth_dev_info dev_info;
2416         struct rte_eth_link link;
2417
2418         if (q_msk == 0)
2419                 return 0;
2420
2421         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2422
2423         dev = &rte_eth_devices[port_id];
2424         rte_eth_dev_info_get(port_id, &dev_info);
2425         link = dev->data->dev_link;
2426
2427         if (vf > dev_info.max_vfs) {
2428                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2429                                 "invalid vf id=%d\n", port_id, vf);
2430                 return -EINVAL;
2431         }
2432
2433         if (tx_rate > link.link_speed) {
2434                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2435                                 "bigger than link speed= %d\n",
2436                                 tx_rate, link.link_speed);
2437                 return -EINVAL;
2438         }
2439
2440         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2441         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2442 }
2443
2444 int
2445 rte_eth_mirror_rule_set(uint8_t port_id,
2446                         struct rte_eth_mirror_conf *mirror_conf,
2447                         uint8_t rule_id, uint8_t on)
2448 {
2449         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2450
2451         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2452         if (mirror_conf->rule_type == 0) {
2453                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2454                 return -EINVAL;
2455         }
2456
2457         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2458                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2459                                 ETH_64_POOLS - 1);
2460                 return -EINVAL;
2461         }
2462
2463         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2464              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2465             (mirror_conf->pool_mask == 0)) {
2466                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2467                 return -EINVAL;
2468         }
2469
2470         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2471             mirror_conf->vlan.vlan_mask == 0) {
2472                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2473                 return -EINVAL;
2474         }
2475
2476         dev = &rte_eth_devices[port_id];
2477         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2478
2479         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2480 }
2481
2482 int
2483 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2484 {
2485         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2486
2487         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2488
2489         dev = &rte_eth_devices[port_id];
2490         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2491
2492         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2493 }
2494
2495 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2496 uint16_t
2497 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2498                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2499 {
2500         struct rte_eth_dev *dev;
2501
2502         VALID_PORTID_OR_ERR_RET(port_id, 0);
2503
2504         dev = &rte_eth_devices[port_id];
2505         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2506         if (queue_id >= dev->data->nb_rx_queues) {
2507                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2508                 return 0;
2509         }
2510         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2511                                                 rx_pkts, nb_pkts);
2512 }
2513
2514 uint16_t
2515 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2516                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2517 {
2518         struct rte_eth_dev *dev;
2519
2520         VALID_PORTID_OR_ERR_RET(port_id, 0);
2521
2522         dev = &rte_eth_devices[port_id];
2523
2524         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2525         if (queue_id >= dev->data->nb_tx_queues) {
2526                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2527                 return 0;
2528         }
2529         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2530                                                 tx_pkts, nb_pkts);
2531 }
2532
2533 uint32_t
2534 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2535 {
2536         struct rte_eth_dev *dev;
2537
2538         VALID_PORTID_OR_ERR_RET(port_id, 0);
2539
2540         dev = &rte_eth_devices[port_id];
2541         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2542         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2543 }
2544
2545 int
2546 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2547 {
2548         struct rte_eth_dev *dev;
2549
2550         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2551
2552         dev = &rte_eth_devices[port_id];
2553         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2554         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2555                                                    offset);
2556 }
2557 #endif
2558
2559 int
2560 rte_eth_dev_callback_register(uint8_t port_id,
2561                         enum rte_eth_event_type event,
2562                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2563 {
2564         struct rte_eth_dev *dev;
2565         struct rte_eth_dev_callback *user_cb;
2566
2567         if (!cb_fn)
2568                 return -EINVAL;
2569
2570         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2571
2572         dev = &rte_eth_devices[port_id];
2573         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2574
2575         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2576                 if (user_cb->cb_fn == cb_fn &&
2577                         user_cb->cb_arg == cb_arg &&
2578                         user_cb->event == event) {
2579                         break;
2580                 }
2581         }
2582
2583         /* create a new callback. */
2584         if (user_cb == NULL)
2585                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2586                                       sizeof(struct rte_eth_dev_callback), 0);
2587         if (user_cb != NULL) {
2588                 user_cb->cb_fn = cb_fn;
2589                 user_cb->cb_arg = cb_arg;
2590                 user_cb->event = event;
2591                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2592         }
2593
2594         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2595         return (user_cb == NULL) ? -ENOMEM : 0;
2596 }
2597
2598 int
2599 rte_eth_dev_callback_unregister(uint8_t port_id,
2600                         enum rte_eth_event_type event,
2601                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2602 {
2603         int ret;
2604         struct rte_eth_dev *dev;
2605         struct rte_eth_dev_callback *cb, *next;
2606
2607         if (!cb_fn)
2608                 return -EINVAL;
2609
2610         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2611
2612         dev = &rte_eth_devices[port_id];
2613         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2614
2615         ret = 0;
2616         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2617
2618                 next = TAILQ_NEXT(cb, next);
2619
2620                 if (cb->cb_fn != cb_fn || cb->event != event ||
2621                                 (cb->cb_arg != (void *)-1 &&
2622                                 cb->cb_arg != cb_arg))
2623                         continue;
2624
2625                 /*
2626                  * if this callback is not executing right now,
2627                  * then remove it.
2628                  */
2629                 if (cb->active == 0) {
2630                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2631                         rte_free(cb);
2632                 } else {
2633                         ret = -EAGAIN;
2634                 }
2635         }
2636
2637         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2638         return ret;
2639 }
2640
2641 void
2642 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2643         enum rte_eth_event_type event)
2644 {
2645         struct rte_eth_dev_callback *cb_lst;
2646         struct rte_eth_dev_callback dev_cb;
2647
2648         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2649         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2650                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2651                         continue;
2652                 dev_cb = *cb_lst;
2653                 cb_lst->active = 1;
2654                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2655                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2656                                                 dev_cb.cb_arg);
2657                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2658                 cb_lst->active = 0;
2659         }
2660         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2661 }
2662
2663 int
2664 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2665 {
2666         uint32_t vec;
2667         struct rte_eth_dev *dev;
2668         struct rte_intr_handle *intr_handle;
2669         uint16_t qid;
2670         int rc;
2671
2672         if (!rte_eth_dev_is_valid_port(port_id)) {
2673                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2674                 return -ENODEV;
2675         }
2676
2677         dev = &rte_eth_devices[port_id];
2678         intr_handle = &dev->pci_dev->intr_handle;
2679         if (!intr_handle->intr_vec) {
2680                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2681                 return -EPERM;
2682         }
2683
2684         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2685                 vec = intr_handle->intr_vec[qid];
2686                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2687                 if (rc && rc != -EEXIST) {
2688                         PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2689                                         " op %d epfd %d vec %u\n",
2690                                         port_id, qid, op, epfd, vec);
2691                 }
2692         }
2693
2694         return 0;
2695 }
2696
2697 int
2698 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2699                           int epfd, int op, void *data)
2700 {
2701         uint32_t vec;
2702         struct rte_eth_dev *dev;
2703         struct rte_intr_handle *intr_handle;
2704         int rc;
2705
2706         if (!rte_eth_dev_is_valid_port(port_id)) {
2707                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2708                 return -ENODEV;
2709         }
2710
2711         dev = &rte_eth_devices[port_id];
2712         if (queue_id >= dev->data->nb_rx_queues) {
2713                 PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2714                 return -EINVAL;
2715         }
2716
2717         intr_handle = &dev->pci_dev->intr_handle;
2718         if (!intr_handle->intr_vec) {
2719                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2720                 return -EPERM;
2721         }
2722
2723         vec = intr_handle->intr_vec[queue_id];
2724         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2725         if (rc && rc != -EEXIST) {
2726                 PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2727                                 " op %d epfd %d vec %u\n",
2728                                 port_id, queue_id, op, epfd, vec);
2729                 return rc;
2730         }
2731
2732         return 0;
2733 }
2734
2735 int
2736 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2737                            uint16_t queue_id)
2738 {
2739         struct rte_eth_dev *dev;
2740
2741         if (!rte_eth_dev_is_valid_port(port_id)) {
2742                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2743                 return -ENODEV;
2744         }
2745
2746         dev = &rte_eth_devices[port_id];
2747
2748         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2749         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2750 }
2751
2752 int
2753 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2754                             uint16_t queue_id)
2755 {
2756         struct rte_eth_dev *dev;
2757
2758         if (!rte_eth_dev_is_valid_port(port_id)) {
2759                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2760                 return -ENODEV;
2761         }
2762
2763         dev = &rte_eth_devices[port_id];
2764
2765         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2766         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2767 }
2768
2769 #ifdef RTE_NIC_BYPASS
2770 int rte_eth_dev_bypass_init(uint8_t port_id)
2771 {
2772         struct rte_eth_dev *dev;
2773
2774         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2775
2776         dev = &rte_eth_devices[port_id];
2777         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2778         (*dev->dev_ops->bypass_init)(dev);
2779         return 0;
2780 }
2781
2782 int
2783 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2784 {
2785         struct rte_eth_dev *dev;
2786
2787         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2788
2789         dev = &rte_eth_devices[port_id];
2790         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2791         (*dev->dev_ops->bypass_state_show)(dev, state);
2792         return 0;
2793 }
2794
2795 int
2796 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2797 {
2798         struct rte_eth_dev *dev;
2799
2800         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2801
2802         dev = &rte_eth_devices[port_id];
2803         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2804         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2805         return 0;
2806 }
2807
2808 int
2809 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2810 {
2811         struct rte_eth_dev *dev;
2812
2813         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2814
2815         dev = &rte_eth_devices[port_id];
2816         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2817         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2818         return 0;
2819 }
2820
2821 int
2822 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2823 {
2824         struct rte_eth_dev *dev;
2825
2826         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2827
2828         dev = &rte_eth_devices[port_id];
2829
2830         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2831         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2832         return 0;
2833 }
2834
2835 int
2836 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2837 {
2838         struct rte_eth_dev *dev;
2839
2840         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2841
2842         dev = &rte_eth_devices[port_id];
2843
2844         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2845         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2846         return 0;
2847 }
2848
2849 int
2850 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2851 {
2852         struct rte_eth_dev *dev;
2853
2854         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2855
2856         dev = &rte_eth_devices[port_id];
2857
2858         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2859         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2860         return 0;
2861 }
2862
2863 int
2864 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2865 {
2866         struct rte_eth_dev *dev;
2867
2868         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2869
2870         dev = &rte_eth_devices[port_id];
2871
2872         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2873         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2874         return 0;
2875 }
2876
2877 int
2878 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2879 {
2880         struct rte_eth_dev *dev;
2881
2882         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2883
2884         dev = &rte_eth_devices[port_id];
2885
2886         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2887         (*dev->dev_ops->bypass_wd_reset)(dev);
2888         return 0;
2889 }
2890 #endif
2891
2892 int
2893 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2894 {
2895         struct rte_eth_dev *dev;
2896
2897         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2898
2899         dev = &rte_eth_devices[port_id];
2900         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2901         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2902                                 RTE_ETH_FILTER_NOP, NULL);
2903 }
2904
2905 int
2906 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2907                        enum rte_filter_op filter_op, void *arg)
2908 {
2909         struct rte_eth_dev *dev;
2910
2911         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2912
2913         dev = &rte_eth_devices[port_id];
2914         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2915         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2916 }
2917
2918 void *
2919 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2920                 rte_rx_callback_fn fn, void *user_param)
2921 {
2922 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2923         rte_errno = ENOTSUP;
2924         return NULL;
2925 #endif
2926         /* check input parameters */
2927         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2928                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2929                 rte_errno = EINVAL;
2930                 return NULL;
2931         }
2932
2933         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2934
2935         if (cb == NULL) {
2936                 rte_errno = ENOMEM;
2937                 return NULL;
2938         }
2939
2940         cb->fn.rx = fn;
2941         cb->param = user_param;
2942
2943         /* Add the callbacks in fifo order. */
2944         struct rte_eth_rxtx_callback *tail =
2945                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2946
2947         if (!tail) {
2948                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2949
2950         } else {
2951                 while (tail->next)
2952                         tail = tail->next;
2953                 tail->next = cb;
2954         }
2955
2956         return cb;
2957 }
2958
2959 void *
2960 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2961                 rte_tx_callback_fn fn, void *user_param)
2962 {
2963 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2964         rte_errno = ENOTSUP;
2965         return NULL;
2966 #endif
2967         /* check input parameters */
2968         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2969                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2970                 rte_errno = EINVAL;
2971                 return NULL;
2972         }
2973
2974         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2975
2976         if (cb == NULL) {
2977                 rte_errno = ENOMEM;
2978                 return NULL;
2979         }
2980
2981         cb->fn.tx = fn;
2982         cb->param = user_param;
2983
2984         /* Add the callbacks in fifo order. */
2985         struct rte_eth_rxtx_callback *tail =
2986                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2987
2988         if (!tail) {
2989                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2990
2991         } else {
2992                 while (tail->next)
2993                         tail = tail->next;
2994                 tail->next = cb;
2995         }
2996
2997         return cb;
2998 }
2999
3000 int
3001 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3002                 struct rte_eth_rxtx_callback *user_cb)
3003 {
3004 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3005         return -ENOTSUP;
3006 #endif
3007         /* Check input parameters. */
3008         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3009                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3010                 return -EINVAL;
3011         }
3012
3013         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3014         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3015         struct rte_eth_rxtx_callback *prev_cb;
3016
3017         /* Reset head pointer and remove user cb if first in the list. */
3018         if (cb == user_cb) {
3019                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3020                 return 0;
3021         }
3022
3023         /* Remove the user cb from the callback list. */
3024         do {
3025                 prev_cb = cb;
3026                 cb = cb->next;
3027
3028                 if (cb == user_cb) {
3029                         prev_cb->next = user_cb->next;
3030                         return 0;
3031                 }
3032
3033         } while (cb != NULL);
3034
3035         /* Callback wasn't found. */
3036         return -EINVAL;
3037 }
3038
3039 int
3040 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3041                 struct rte_eth_rxtx_callback *user_cb)
3042 {
3043 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3044         return -ENOTSUP;
3045 #endif
3046         /* Check input parameters. */
3047         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3048                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3049                 return -EINVAL;
3050         }
3051
3052         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3053         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3054         struct rte_eth_rxtx_callback *prev_cb;
3055
3056         /* Reset head pointer and remove user cb if first in the list. */
3057         if (cb == user_cb) {
3058                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3059                 return 0;
3060         }
3061
3062         /* Remove the user cb from the callback list. */
3063         do {
3064                 prev_cb = cb;
3065                 cb = cb->next;
3066
3067                 if (cb == user_cb) {
3068                         prev_cb->next = user_cb->next;
3069                         return 0;
3070                 }
3071
3072         } while (cb != NULL);
3073
3074         /* Callback wasn't found. */
3075         return -EINVAL;
3076 }
3077
3078 int
3079 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3080         struct rte_eth_rxq_info *qinfo)
3081 {
3082         struct rte_eth_dev *dev;
3083
3084         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3085
3086         if (qinfo == NULL)
3087                 return -EINVAL;
3088
3089         dev = &rte_eth_devices[port_id];
3090         if (queue_id >= dev->data->nb_rx_queues) {
3091                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3092                 return -EINVAL;
3093         }
3094
3095         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3096
3097         memset(qinfo, 0, sizeof(*qinfo));
3098         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3099         return 0;
3100 }
3101
3102 int
3103 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3104         struct rte_eth_txq_info *qinfo)
3105 {
3106         struct rte_eth_dev *dev;
3107
3108         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3109
3110         if (qinfo == NULL)
3111                 return -EINVAL;
3112
3113         dev = &rte_eth_devices[port_id];
3114         if (queue_id >= dev->data->nb_tx_queues) {
3115                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3116                 return -EINVAL;
3117         }
3118
3119         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3120
3121         memset(qinfo, 0, sizeof(*qinfo));
3122         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3123         return 0;
3124 }
3125
3126 int
3127 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3128                              struct ether_addr *mc_addr_set,
3129                              uint32_t nb_mc_addr)
3130 {
3131         struct rte_eth_dev *dev;
3132
3133         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3134
3135         dev = &rte_eth_devices[port_id];
3136         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3137         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3138 }
3139
3140 int
3141 rte_eth_timesync_enable(uint8_t port_id)
3142 {
3143         struct rte_eth_dev *dev;
3144
3145         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3146         dev = &rte_eth_devices[port_id];
3147
3148         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3149         return (*dev->dev_ops->timesync_enable)(dev);
3150 }
3151
3152 int
3153 rte_eth_timesync_disable(uint8_t port_id)
3154 {
3155         struct rte_eth_dev *dev;
3156
3157         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3158         dev = &rte_eth_devices[port_id];
3159
3160         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3161         return (*dev->dev_ops->timesync_disable)(dev);
3162 }
3163
3164 int
3165 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3166                                    uint32_t flags)
3167 {
3168         struct rte_eth_dev *dev;
3169
3170         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3171         dev = &rte_eth_devices[port_id];
3172
3173         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3174         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3175 }
3176
3177 int
3178 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3179 {
3180         struct rte_eth_dev *dev;
3181
3182         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3183         dev = &rte_eth_devices[port_id];
3184
3185         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3186         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3187 }
3188
3189 int
3190 rte_eth_dev_get_reg_length(uint8_t port_id)
3191 {
3192         struct rte_eth_dev *dev;
3193
3194         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3195
3196         dev = &rte_eth_devices[port_id];
3197         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3198         return (*dev->dev_ops->get_reg_length)(dev);
3199 }
3200
3201 int
3202 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3203 {
3204         struct rte_eth_dev *dev;
3205
3206         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3207
3208         dev = &rte_eth_devices[port_id];
3209         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3210         return (*dev->dev_ops->get_reg)(dev, info);
3211 }
3212
3213 int
3214 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3215 {
3216         struct rte_eth_dev *dev;
3217
3218         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3219
3220         dev = &rte_eth_devices[port_id];
3221         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3222         return (*dev->dev_ops->get_eeprom_length)(dev);
3223 }
3224
3225 int
3226 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3227 {
3228         struct rte_eth_dev *dev;
3229
3230         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3231
3232         dev = &rte_eth_devices[port_id];
3233         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3234         return (*dev->dev_ops->get_eeprom)(dev, info);
3235 }
3236
3237 int
3238 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3239 {
3240         struct rte_eth_dev *dev;
3241
3242         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3243
3244         dev = &rte_eth_devices[port_id];
3245         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3246         return (*dev->dev_ops->set_eeprom)(dev, info);
3247 }
3248
3249 int
3250 rte_eth_dev_get_dcb_info(uint8_t port_id,
3251                              struct rte_eth_dcb_info *dcb_info)
3252 {
3253         struct rte_eth_dev *dev;
3254
3255         if (!rte_eth_dev_is_valid_port(port_id)) {
3256                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3257                 return -ENODEV;
3258         }
3259
3260         dev = &rte_eth_devices[port_id];
3261         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3262
3263         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3264         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3265 }
3266
3267 void
3268 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3269 {
3270         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3271                 PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3272                                 eth_dev, pci_dev);
3273                 return;
3274         }
3275
3276         eth_dev->data->dev_flags = 0;
3277         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3278                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3279         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3280                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3281
3282         eth_dev->data->kdrv = pci_dev->kdrv;
3283         eth_dev->data->numa_node = pci_dev->numa_node;
3284         eth_dev->data->drv_name = pci_dev->driver->name;
3285 }