073ffe9e00db077cb7baf41cdad3d443528df67b
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
145         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
146         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
147                 rx_nombuf)},
148 };
149
150 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
151
152 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
153         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
154         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
155         {"errors", offsetof(struct rte_eth_stats, q_errors)},
156 };
157
158 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
159                 sizeof(rte_rxq_stats_strings[0]))
160
161 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
162         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
163         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
164 };
165 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
166                 sizeof(rte_txq_stats_strings[0]))
167
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 enum {
189         DEV_DETACHED = 0,
190         DEV_ATTACHED
191 };
192
193 static void
194 rte_eth_dev_data_alloc(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
200                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
202                                 rte_socket_id(), flags);
203         } else
204                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
205         if (mz == NULL)
206                 rte_panic("Cannot allocate memzone for ethernet port data\n");
207
208         rte_eth_dev_data = mz->addr;
209         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210                 memset(rte_eth_dev_data, 0,
211                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
212 }
213
214 struct rte_eth_dev *
215 rte_eth_dev_allocated(const char *name)
216 {
217         unsigned i;
218
219         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
220                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
221                     strcmp(rte_eth_devices[i].data->name, name) == 0)
222                         return &rte_eth_devices[i];
223         }
224         return NULL;
225 }
226
227 static uint8_t
228 rte_eth_dev_find_free_port(void)
229 {
230         unsigned i;
231
232         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
233                 if (rte_eth_devices[i].attached == DEV_DETACHED)
234                         return i;
235         }
236         return RTE_MAX_ETHPORTS;
237 }
238
239 struct rte_eth_dev *
240 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
241 {
242         uint8_t port_id;
243         struct rte_eth_dev *eth_dev;
244
245         port_id = rte_eth_dev_find_free_port();
246         if (port_id == RTE_MAX_ETHPORTS) {
247                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
248                 return NULL;
249         }
250
251         if (rte_eth_dev_data == NULL)
252                 rte_eth_dev_data_alloc();
253
254         if (rte_eth_dev_allocated(name) != NULL) {
255                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
256                                 name);
257                 return NULL;
258         }
259
260         eth_dev = &rte_eth_devices[port_id];
261         eth_dev->data = &rte_eth_dev_data[port_id];
262         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
263         eth_dev->data->port_id = port_id;
264         eth_dev->attached = DEV_ATTACHED;
265         eth_dev->dev_type = type;
266         nb_ports++;
267         return eth_dev;
268 }
269
270 static int
271 rte_eth_dev_create_unique_device_name(char *name, size_t size,
272                 struct rte_pci_device *pci_dev)
273 {
274         int ret;
275
276         if ((name == NULL) || (pci_dev == NULL))
277                 return -EINVAL;
278
279         ret = snprintf(name, size, "%d:%d.%d",
280                         pci_dev->addr.bus, pci_dev->addr.devid,
281                         pci_dev->addr.function);
282         if (ret < 0)
283                 return ret;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
289 {
290         if (eth_dev == NULL)
291                 return -EINVAL;
292
293         eth_dev->attached = DEV_DETACHED;
294         nb_ports--;
295         return 0;
296 }
297
298 static int
299 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
300                  struct rte_pci_device *pci_dev)
301 {
302         struct eth_driver    *eth_drv;
303         struct rte_eth_dev *eth_dev;
304         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
305
306         int diag;
307
308         eth_drv = (struct eth_driver *)pci_drv;
309
310         /* Create unique Ethernet device name using PCI address */
311         rte_eth_dev_create_unique_device_name(ethdev_name,
312                         sizeof(ethdev_name), pci_dev);
313
314         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
315         if (eth_dev == NULL)
316                 return -ENOMEM;
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
319                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
320                                   eth_drv->dev_private_size,
321                                   RTE_CACHE_LINE_SIZE);
322                 if (eth_dev->data->dev_private == NULL)
323                         rte_panic("Cannot allocate memzone for private port data\n");
324         }
325         eth_dev->pci_dev = pci_dev;
326         eth_dev->driver = eth_drv;
327         eth_dev->data->rx_mbuf_alloc_failed = 0;
328
329         /* init user callbacks */
330         TAILQ_INIT(&(eth_dev->link_intr_cbs));
331
332         /*
333          * Set the default MTU.
334          */
335         eth_dev->data->mtu = ETHER_MTU;
336
337         /* Invoke PMD device initialization function */
338         diag = (*eth_drv->eth_dev_init)(eth_dev);
339         if (diag == 0)
340                 return 0;
341
342         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
343                         pci_drv->name,
344                         (unsigned) pci_dev->id.vendor_id,
345                         (unsigned) pci_dev->id.device_id);
346         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
347                 rte_free(eth_dev->data->dev_private);
348         rte_eth_dev_release_port(eth_dev);
349         return diag;
350 }
351
352 static int
353 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
354 {
355         const struct eth_driver *eth_drv;
356         struct rte_eth_dev *eth_dev;
357         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
358         int ret;
359
360         if (pci_dev == NULL)
361                 return -EINVAL;
362
363         /* Create unique Ethernet device name using PCI address */
364         rte_eth_dev_create_unique_device_name(ethdev_name,
365                         sizeof(ethdev_name), pci_dev);
366
367         eth_dev = rte_eth_dev_allocated(ethdev_name);
368         if (eth_dev == NULL)
369                 return -ENODEV;
370
371         eth_drv = (const struct eth_driver *)pci_dev->driver;
372
373         /* Invoke PMD device uninit function */
374         if (*eth_drv->eth_dev_uninit) {
375                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
376                 if (ret)
377                         return ret;
378         }
379
380         /* free ether device */
381         rte_eth_dev_release_port(eth_dev);
382
383         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
384                 rte_free(eth_dev->data->dev_private);
385
386         eth_dev->pci_dev = NULL;
387         eth_dev->driver = NULL;
388         eth_dev->data = NULL;
389
390         return 0;
391 }
392
393 /**
394  * Register an Ethernet [Poll Mode] driver.
395  *
396  * Function invoked by the initialization function of an Ethernet driver
397  * to simultaneously register itself as a PCI driver and as an Ethernet
398  * Poll Mode Driver.
399  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
400  * structure embedded in the *eth_drv* structure, after having stored the
401  * address of the rte_eth_dev_init() function in the *devinit* field of
402  * the *pci_drv* structure.
403  * During the PCI probing phase, the rte_eth_dev_init() function is
404  * invoked for each PCI [Ethernet device] matching the embedded PCI
405  * identifiers provided by the driver.
406  */
407 void
408 rte_eth_driver_register(struct eth_driver *eth_drv)
409 {
410         eth_drv->pci_drv.devinit = rte_eth_dev_init;
411         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
412         rte_eal_pci_register(&eth_drv->pci_drv);
413 }
414
415 int
416 rte_eth_dev_is_valid_port(uint8_t port_id)
417 {
418         if (port_id >= RTE_MAX_ETHPORTS ||
419             rte_eth_devices[port_id].attached != DEV_ATTACHED)
420                 return 0;
421         else
422                 return 1;
423 }
424
425 int
426 rte_eth_dev_socket_id(uint8_t port_id)
427 {
428         if (!rte_eth_dev_is_valid_port(port_id))
429                 return -1;
430         return rte_eth_devices[port_id].pci_dev->numa_node;
431 }
432
433 uint8_t
434 rte_eth_dev_count(void)
435 {
436         return nb_ports;
437 }
438
439 static enum rte_eth_dev_type
440 rte_eth_dev_get_device_type(uint8_t port_id)
441 {
442         if (!rte_eth_dev_is_valid_port(port_id))
443                 return RTE_ETH_DEV_UNKNOWN;
444         return rte_eth_devices[port_id].dev_type;
445 }
446
447 static int
448 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
449 {
450         if ((devs == NULL) ||
451             (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
452                 return -EINVAL;
453
454         /* save current rte_eth_devices */
455         memcpy(devs, rte_eth_devices, size);
456         return 0;
457 }
458
459 static int
460 rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
461 {
462         if ((devs == NULL) || (port_id == NULL))
463                 return -EINVAL;
464
465         /* check which port was attached or detached */
466         for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
467                 if (rte_eth_devices[*port_id].attached ^ devs->attached)
468                         return 0;
469         }
470         return -ENODEV;
471 }
472
473 static int
474 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
475 {
476         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
477
478         if (addr == NULL) {
479                 PMD_DEBUG_TRACE("Null pointer is specified\n");
480                 return -EINVAL;
481         }
482
483         *addr = rte_eth_devices[port_id].pci_dev->addr;
484         return 0;
485 }
486
487 static int
488 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
489 {
490         char *tmp;
491
492         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
493
494         if (name == NULL) {
495                 PMD_DEBUG_TRACE("Null pointer is specified\n");
496                 return -EINVAL;
497         }
498
499         /* shouldn't check 'rte_eth_devices[i].data',
500          * because it might be overwritten by VDEV PMD */
501         tmp = rte_eth_dev_data[port_id].name;
502         strcpy(name, tmp);
503         return 0;
504 }
505
506 static int
507 rte_eth_dev_is_detachable(uint8_t port_id)
508 {
509         uint32_t drv_flags;
510
511         if (!rte_eth_dev_is_valid_port(port_id)) {
512                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
513                 return -EINVAL;
514         }
515
516         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
517                 switch (rte_eth_devices[port_id].pci_dev->kdrv) {
518                 case RTE_KDRV_IGB_UIO:
519                 case RTE_KDRV_UIO_GENERIC:
520                 case RTE_KDRV_NIC_UIO:
521                         break;
522                 case RTE_KDRV_VFIO:
523                 default:
524                         return -ENOTSUP;
525                 }
526         }
527
528         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
529         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
530 }
531
532 /* attach the new physical device, then store port_id of the device */
533 static int
534 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
535 {
536         uint8_t new_port_id;
537         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
538
539         if ((addr == NULL) || (port_id == NULL))
540                 goto err;
541
542         /* save current port status */
543         if (rte_eth_dev_save(devs, sizeof(devs)))
544                 goto err;
545         /* re-construct pci_device_list */
546         if (rte_eal_pci_scan())
547                 goto err;
548         /* invoke probe func of the driver can handle the new device.
549          * TODO:
550          * rte_eal_pci_probe_one() should return port_id.
551          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
552          * should be removed. */
553         if (rte_eal_pci_probe_one(addr))
554                 goto err;
555         /* get port_id enabled by above procedures */
556         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
557                 goto err;
558
559         *port_id = new_port_id;
560         return 0;
561 err:
562         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
563         return -1;
564 }
565
566 /* detach the new physical device, then store pci_addr of the device */
567 static int
568 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
569 {
570         struct rte_pci_addr freed_addr;
571         struct rte_pci_addr vp;
572
573         if (addr == NULL)
574                 goto err;
575
576         /* check whether the driver supports detach feature, or not */
577         if (rte_eth_dev_is_detachable(port_id))
578                 goto err;
579
580         /* get pci address by port id */
581         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
582                 goto err;
583
584         /* Zeroed pci addr means the port comes from virtual device */
585         vp.domain = vp.bus = vp.devid = vp.function = 0;
586         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
587                 goto err;
588
589         /* invoke devuninit func of the pci driver,
590          * also remove the device from pci_device_list */
591         if (rte_eal_pci_detach(&freed_addr))
592                 goto err;
593
594         *addr = freed_addr;
595         return 0;
596 err:
597         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
598         return -1;
599 }
600
601 /* attach the new virtual device, then store port_id of the device */
602 static int
603 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
604 {
605         char *name = NULL, *args = NULL;
606         uint8_t new_port_id;
607         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
608         int ret = -1;
609
610         if ((vdevargs == NULL) || (port_id == NULL))
611                 goto end;
612
613         /* parse vdevargs, then retrieve device name and args */
614         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
615                 goto end;
616
617         /* save current port status */
618         if (rte_eth_dev_save(devs, sizeof(devs)))
619                 goto end;
620         /* walk around dev_driver_list to find the driver of the device,
621          * then invoke probe function o the driver.
622          * TODO:
623          * rte_eal_vdev_init() should return port_id,
624          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
625          * should be removed. */
626         if (rte_eal_vdev_init(name, args))
627                 goto end;
628         /* get port_id enabled by above procedures */
629         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
630                 goto end;
631         ret = 0;
632         *port_id = new_port_id;
633 end:
634         if (name)
635                 free(name);
636         if (args)
637                 free(args);
638
639         if (ret < 0)
640                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
641         return ret;
642 }
643
644 /* detach the new virtual device, then store the name of the device */
645 static int
646 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
647 {
648         char name[RTE_ETH_NAME_MAX_LEN];
649
650         if (vdevname == NULL)
651                 goto err;
652
653         /* check whether the driver supports detach feature, or not */
654         if (rte_eth_dev_is_detachable(port_id))
655                 goto err;
656
657         /* get device name by port id */
658         if (rte_eth_dev_get_name_by_port(port_id, name))
659                 goto err;
660         /* walk around dev_driver_list to find the driver of the device,
661          * then invoke uninit function of the driver */
662         if (rte_eal_vdev_uninit(name))
663                 goto err;
664
665         strncpy(vdevname, name, sizeof(name));
666         return 0;
667 err:
668         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
669         return -1;
670 }
671
672 /* attach the new device, then store port_id of the device */
673 int
674 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
675 {
676         struct rte_pci_addr addr;
677
678         if ((devargs == NULL) || (port_id == NULL))
679                 return -EINVAL;
680
681         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
682                 return rte_eth_dev_attach_pdev(&addr, port_id);
683         else
684                 return rte_eth_dev_attach_vdev(devargs, port_id);
685 }
686
687 /* detach the device, then store the name of the device */
688 int
689 rte_eth_dev_detach(uint8_t port_id, char *name)
690 {
691         struct rte_pci_addr addr;
692         int ret;
693
694         if (name == NULL)
695                 return -EINVAL;
696
697         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
698                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
699                 if (ret < 0)
700                         return ret;
701
702                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
703                 if (ret == 0)
704                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
705                                 "%04x:%02x:%02x.%d",
706                                 addr.domain, addr.bus,
707                                 addr.devid, addr.function);
708
709                 return ret;
710         } else
711                 return rte_eth_dev_detach_vdev(port_id, name);
712 }
713
714 static int
715 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
716 {
717         uint16_t old_nb_queues = dev->data->nb_rx_queues;
718         void **rxq;
719         unsigned i;
720
721         if (dev->data->rx_queues == NULL) { /* first time configuration */
722                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
723                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
724                                 RTE_CACHE_LINE_SIZE);
725                 if (dev->data->rx_queues == NULL) {
726                         dev->data->nb_rx_queues = 0;
727                         return -(ENOMEM);
728                 }
729         } else { /* re-configure */
730                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
731
732                 rxq = dev->data->rx_queues;
733
734                 for (i = nb_queues; i < old_nb_queues; i++)
735                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
736                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
737                                 RTE_CACHE_LINE_SIZE);
738                 if (rxq == NULL)
739                         return -(ENOMEM);
740                 if (nb_queues > old_nb_queues) {
741                         uint16_t new_qs = nb_queues - old_nb_queues;
742
743                         memset(rxq + old_nb_queues, 0,
744                                 sizeof(rxq[0]) * new_qs);
745                 }
746
747                 dev->data->rx_queues = rxq;
748
749         }
750         dev->data->nb_rx_queues = nb_queues;
751         return 0;
752 }
753
754 int
755 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
756 {
757         struct rte_eth_dev *dev;
758
759         /* This function is only safe when called from the primary process
760          * in a multi-process setup*/
761         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
762
763         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
764
765         dev = &rte_eth_devices[port_id];
766         if (rx_queue_id >= dev->data->nb_rx_queues) {
767                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
768                 return -EINVAL;
769         }
770
771         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
772
773         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
774
775 }
776
777 int
778 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
779 {
780         struct rte_eth_dev *dev;
781
782         /* This function is only safe when called from the primary process
783          * in a multi-process setup*/
784         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
785
786         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         dev = &rte_eth_devices[port_id];
789         if (rx_queue_id >= dev->data->nb_rx_queues) {
790                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
791                 return -EINVAL;
792         }
793
794         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
795
796         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
797
798 }
799
800 int
801 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
802 {
803         struct rte_eth_dev *dev;
804
805         /* This function is only safe when called from the primary process
806          * in a multi-process setup*/
807         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
808
809         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
810
811         dev = &rte_eth_devices[port_id];
812         if (tx_queue_id >= dev->data->nb_tx_queues) {
813                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
814                 return -EINVAL;
815         }
816
817         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
818
819         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
820
821 }
822
823 int
824 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
825 {
826         struct rte_eth_dev *dev;
827
828         /* This function is only safe when called from the primary process
829          * in a multi-process setup*/
830         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
831
832         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
833
834         dev = &rte_eth_devices[port_id];
835         if (tx_queue_id >= dev->data->nb_tx_queues) {
836                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
837                 return -EINVAL;
838         }
839
840         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
841
842         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
843
844 }
845
846 static int
847 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
848 {
849         uint16_t old_nb_queues = dev->data->nb_tx_queues;
850         void **txq;
851         unsigned i;
852
853         if (dev->data->tx_queues == NULL) { /* first time configuration */
854                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
855                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
856                                                    RTE_CACHE_LINE_SIZE);
857                 if (dev->data->tx_queues == NULL) {
858                         dev->data->nb_tx_queues = 0;
859                         return -(ENOMEM);
860                 }
861         } else { /* re-configure */
862                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
863
864                 txq = dev->data->tx_queues;
865
866                 for (i = nb_queues; i < old_nb_queues; i++)
867                         (*dev->dev_ops->tx_queue_release)(txq[i]);
868                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
869                                   RTE_CACHE_LINE_SIZE);
870                 if (txq == NULL)
871                         return -ENOMEM;
872                 if (nb_queues > old_nb_queues) {
873                         uint16_t new_qs = nb_queues - old_nb_queues;
874
875                         memset(txq + old_nb_queues, 0,
876                                sizeof(txq[0]) * new_qs);
877                 }
878
879                 dev->data->tx_queues = txq;
880
881         }
882         dev->data->nb_tx_queues = nb_queues;
883         return 0;
884 }
885
886 int
887 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
888                       const struct rte_eth_conf *dev_conf)
889 {
890         struct rte_eth_dev *dev;
891         struct rte_eth_dev_info dev_info;
892         int diag;
893
894         /* This function is only safe when called from the primary process
895          * in a multi-process setup*/
896         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
897
898         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
899
900         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
901                 PMD_DEBUG_TRACE(
902                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
903                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
904                 return -EINVAL;
905         }
906
907         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
908                 PMD_DEBUG_TRACE(
909                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
910                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
911                 return -EINVAL;
912         }
913
914         dev = &rte_eth_devices[port_id];
915
916         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
917         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
918
919         if (dev->data->dev_started) {
920                 PMD_DEBUG_TRACE(
921                     "port %d must be stopped to allow configuration\n", port_id);
922                 return -EBUSY;
923         }
924
925         /*
926          * Check that the numbers of RX and TX queues are not greater
927          * than the maximum number of RX and TX queues supported by the
928          * configured device.
929          */
930         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
931         if (nb_rx_q > dev_info.max_rx_queues) {
932                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
933                                 port_id, nb_rx_q, dev_info.max_rx_queues);
934                 return -EINVAL;
935         }
936         if (nb_rx_q == 0) {
937                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
938                 return -EINVAL;
939         }
940
941         if (nb_tx_q > dev_info.max_tx_queues) {
942                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
943                                 port_id, nb_tx_q, dev_info.max_tx_queues);
944                 return -EINVAL;
945         }
946         if (nb_tx_q == 0) {
947                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
948                 return -EINVAL;
949         }
950
951         /* Copy the dev_conf parameter into the dev structure */
952         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
953
954         /*
955          * If link state interrupt is enabled, check that the
956          * device supports it.
957          */
958         if (dev_conf->intr_conf.lsc == 1) {
959                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
960
961                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
962                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
963                                         pci_drv->name);
964                         return -EINVAL;
965                 }
966         }
967
968         /*
969          * If jumbo frames are enabled, check that the maximum RX packet
970          * length is supported by the configured device.
971          */
972         if (dev_conf->rxmode.jumbo_frame == 1) {
973                 if (dev_conf->rxmode.max_rx_pkt_len >
974                     dev_info.max_rx_pktlen) {
975                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
976                                 " > max valid value %u\n",
977                                 port_id,
978                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
979                                 (unsigned)dev_info.max_rx_pktlen);
980                         return -EINVAL;
981                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
982                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
983                                 " < min valid value %u\n",
984                                 port_id,
985                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
986                                 (unsigned)ETHER_MIN_LEN);
987                         return -EINVAL;
988                 }
989         } else {
990                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
991                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
992                         /* Use default value */
993                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
994                                                         ETHER_MAX_LEN;
995         }
996
997         /*
998          * Setup new number of RX/TX queues and reconfigure device.
999          */
1000         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1001         if (diag != 0) {
1002                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1003                                 port_id, diag);
1004                 return diag;
1005         }
1006
1007         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1008         if (diag != 0) {
1009                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1010                                 port_id, diag);
1011                 rte_eth_dev_rx_queue_config(dev, 0);
1012                 return diag;
1013         }
1014
1015         diag = (*dev->dev_ops->dev_configure)(dev);
1016         if (diag != 0) {
1017                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1018                                 port_id, diag);
1019                 rte_eth_dev_rx_queue_config(dev, 0);
1020                 rte_eth_dev_tx_queue_config(dev, 0);
1021                 return diag;
1022         }
1023
1024         return 0;
1025 }
1026
1027 static void
1028 rte_eth_dev_config_restore(uint8_t port_id)
1029 {
1030         struct rte_eth_dev *dev;
1031         struct rte_eth_dev_info dev_info;
1032         struct ether_addr addr;
1033         uint16_t i;
1034         uint32_t pool = 0;
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         rte_eth_dev_info_get(port_id, &dev_info);
1039
1040         if (RTE_ETH_DEV_SRIOV(dev).active)
1041                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1042
1043         /* replay MAC address configuration */
1044         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1045                 addr = dev->data->mac_addrs[i];
1046
1047                 /* skip zero address */
1048                 if (is_zero_ether_addr(&addr))
1049                         continue;
1050
1051                 /* add address to the hardware */
1052                 if  (*dev->dev_ops->mac_addr_add &&
1053                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1054                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1055                 else {
1056                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1057                                         port_id);
1058                         /* exit the loop but not return an error */
1059                         break;
1060                 }
1061         }
1062
1063         /* replay promiscuous configuration */
1064         if (rte_eth_promiscuous_get(port_id) == 1)
1065                 rte_eth_promiscuous_enable(port_id);
1066         else if (rte_eth_promiscuous_get(port_id) == 0)
1067                 rte_eth_promiscuous_disable(port_id);
1068
1069         /* replay all multicast configuration */
1070         if (rte_eth_allmulticast_get(port_id) == 1)
1071                 rte_eth_allmulticast_enable(port_id);
1072         else if (rte_eth_allmulticast_get(port_id) == 0)
1073                 rte_eth_allmulticast_disable(port_id);
1074 }
1075
1076 int
1077 rte_eth_dev_start(uint8_t port_id)
1078 {
1079         struct rte_eth_dev *dev;
1080         int diag;
1081
1082         /* This function is only safe when called from the primary process
1083          * in a multi-process setup*/
1084         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1085
1086         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1087
1088         dev = &rte_eth_devices[port_id];
1089
1090         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1091
1092         if (dev->data->dev_started != 0) {
1093                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1094                         " already started\n",
1095                         port_id);
1096                 return 0;
1097         }
1098
1099         diag = (*dev->dev_ops->dev_start)(dev);
1100         if (diag == 0)
1101                 dev->data->dev_started = 1;
1102         else
1103                 return diag;
1104
1105         rte_eth_dev_config_restore(port_id);
1106
1107         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1108                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1109                 (*dev->dev_ops->link_update)(dev, 0);
1110         }
1111         return 0;
1112 }
1113
1114 void
1115 rte_eth_dev_stop(uint8_t port_id)
1116 {
1117         struct rte_eth_dev *dev;
1118
1119         /* This function is only safe when called from the primary process
1120          * in a multi-process setup*/
1121         PROC_PRIMARY_OR_RET();
1122
1123         VALID_PORTID_OR_RET(port_id);
1124         dev = &rte_eth_devices[port_id];
1125
1126         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1127
1128         if (dev->data->dev_started == 0) {
1129                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1130                         " already stopped\n",
1131                         port_id);
1132                 return;
1133         }
1134
1135         dev->data->dev_started = 0;
1136         (*dev->dev_ops->dev_stop)(dev);
1137 }
1138
1139 int
1140 rte_eth_dev_set_link_up(uint8_t port_id)
1141 {
1142         struct rte_eth_dev *dev;
1143
1144         /* This function is only safe when called from the primary process
1145          * in a multi-process setup*/
1146         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1147
1148         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1149
1150         dev = &rte_eth_devices[port_id];
1151
1152         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1153         return (*dev->dev_ops->dev_set_link_up)(dev);
1154 }
1155
1156 int
1157 rte_eth_dev_set_link_down(uint8_t port_id)
1158 {
1159         struct rte_eth_dev *dev;
1160
1161         /* This function is only safe when called from the primary process
1162          * in a multi-process setup*/
1163         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1164
1165         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1166
1167         dev = &rte_eth_devices[port_id];
1168
1169         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1170         return (*dev->dev_ops->dev_set_link_down)(dev);
1171 }
1172
1173 void
1174 rte_eth_dev_close(uint8_t port_id)
1175 {
1176         struct rte_eth_dev *dev;
1177
1178         /* This function is only safe when called from the primary process
1179          * in a multi-process setup*/
1180         PROC_PRIMARY_OR_RET();
1181
1182         VALID_PORTID_OR_RET(port_id);
1183         dev = &rte_eth_devices[port_id];
1184
1185         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1186         dev->data->dev_started = 0;
1187         (*dev->dev_ops->dev_close)(dev);
1188
1189         rte_free(dev->data->rx_queues);
1190         dev->data->rx_queues = NULL;
1191         rte_free(dev->data->tx_queues);
1192         dev->data->tx_queues = NULL;
1193 }
1194
1195 int
1196 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1197                        uint16_t nb_rx_desc, unsigned int socket_id,
1198                        const struct rte_eth_rxconf *rx_conf,
1199                        struct rte_mempool *mp)
1200 {
1201         int ret;
1202         uint32_t mbp_buf_size;
1203         struct rte_eth_dev *dev;
1204         struct rte_eth_dev_info dev_info;
1205
1206         /* This function is only safe when called from the primary process
1207          * in a multi-process setup*/
1208         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1209
1210         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1211
1212         dev = &rte_eth_devices[port_id];
1213         if (rx_queue_id >= dev->data->nb_rx_queues) {
1214                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1215                 return -EINVAL;
1216         }
1217
1218         if (dev->data->dev_started) {
1219                 PMD_DEBUG_TRACE(
1220                     "port %d must be stopped to allow configuration\n", port_id);
1221                 return -EBUSY;
1222         }
1223
1224         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1225         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1226
1227         /*
1228          * Check the size of the mbuf data buffer.
1229          * This value must be provided in the private data of the memory pool.
1230          * First check that the memory pool has a valid private data.
1231          */
1232         rte_eth_dev_info_get(port_id, &dev_info);
1233         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1234                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1235                                 mp->name, (int) mp->private_data_size,
1236                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1237                 return -ENOSPC;
1238         }
1239         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1240
1241         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1242                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1243                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1244                                 "=%d)\n",
1245                                 mp->name,
1246                                 (int)mbp_buf_size,
1247                                 (int)(RTE_PKTMBUF_HEADROOM +
1248                                       dev_info.min_rx_bufsize),
1249                                 (int)RTE_PKTMBUF_HEADROOM,
1250                                 (int)dev_info.min_rx_bufsize);
1251                 return -EINVAL;
1252         }
1253
1254         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1255                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1256                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1257
1258                 PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1259                         "should be: <= %hu, = %hu, and a product of %hu\n",
1260                         nb_rx_desc,
1261                         dev_info.rx_desc_lim.nb_max,
1262                         dev_info.rx_desc_lim.nb_min,
1263                         dev_info.rx_desc_lim.nb_align);
1264                 return -EINVAL;
1265         }
1266
1267         if (rx_conf == NULL)
1268                 rx_conf = &dev_info.default_rxconf;
1269
1270         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1271                                               socket_id, rx_conf, mp);
1272         if (!ret) {
1273                 if (!dev->data->min_rx_buf_size ||
1274                     dev->data->min_rx_buf_size > mbp_buf_size)
1275                         dev->data->min_rx_buf_size = mbp_buf_size;
1276         }
1277
1278         return ret;
1279 }
1280
1281 int
1282 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1283                        uint16_t nb_tx_desc, unsigned int socket_id,
1284                        const struct rte_eth_txconf *tx_conf)
1285 {
1286         struct rte_eth_dev *dev;
1287         struct rte_eth_dev_info dev_info;
1288
1289         /* This function is only safe when called from the primary process
1290          * in a multi-process setup*/
1291         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1292
1293         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1294
1295         dev = &rte_eth_devices[port_id];
1296         if (tx_queue_id >= dev->data->nb_tx_queues) {
1297                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1298                 return -EINVAL;
1299         }
1300
1301         if (dev->data->dev_started) {
1302                 PMD_DEBUG_TRACE(
1303                     "port %d must be stopped to allow configuration\n", port_id);
1304                 return -EBUSY;
1305         }
1306
1307         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1308         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1309
1310         rte_eth_dev_info_get(port_id, &dev_info);
1311
1312         if (tx_conf == NULL)
1313                 tx_conf = &dev_info.default_txconf;
1314
1315         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1316                                                socket_id, tx_conf);
1317 }
1318
1319 void
1320 rte_eth_promiscuous_enable(uint8_t port_id)
1321 {
1322         struct rte_eth_dev *dev;
1323
1324         VALID_PORTID_OR_RET(port_id);
1325         dev = &rte_eth_devices[port_id];
1326
1327         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1328         (*dev->dev_ops->promiscuous_enable)(dev);
1329         dev->data->promiscuous = 1;
1330 }
1331
1332 void
1333 rte_eth_promiscuous_disable(uint8_t port_id)
1334 {
1335         struct rte_eth_dev *dev;
1336
1337         VALID_PORTID_OR_RET(port_id);
1338         dev = &rte_eth_devices[port_id];
1339
1340         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1341         dev->data->promiscuous = 0;
1342         (*dev->dev_ops->promiscuous_disable)(dev);
1343 }
1344
1345 int
1346 rte_eth_promiscuous_get(uint8_t port_id)
1347 {
1348         struct rte_eth_dev *dev;
1349
1350         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1351
1352         dev = &rte_eth_devices[port_id];
1353         return dev->data->promiscuous;
1354 }
1355
1356 void
1357 rte_eth_allmulticast_enable(uint8_t port_id)
1358 {
1359         struct rte_eth_dev *dev;
1360
1361         VALID_PORTID_OR_RET(port_id);
1362         dev = &rte_eth_devices[port_id];
1363
1364         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1365         (*dev->dev_ops->allmulticast_enable)(dev);
1366         dev->data->all_multicast = 1;
1367 }
1368
1369 void
1370 rte_eth_allmulticast_disable(uint8_t port_id)
1371 {
1372         struct rte_eth_dev *dev;
1373
1374         VALID_PORTID_OR_RET(port_id);
1375         dev = &rte_eth_devices[port_id];
1376
1377         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1378         dev->data->all_multicast = 0;
1379         (*dev->dev_ops->allmulticast_disable)(dev);
1380 }
1381
1382 int
1383 rte_eth_allmulticast_get(uint8_t port_id)
1384 {
1385         struct rte_eth_dev *dev;
1386
1387         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1388
1389         dev = &rte_eth_devices[port_id];
1390         return dev->data->all_multicast;
1391 }
1392
1393 static inline int
1394 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1395                                 struct rte_eth_link *link)
1396 {
1397         struct rte_eth_link *dst = link;
1398         struct rte_eth_link *src = &(dev->data->dev_link);
1399
1400         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1401                                         *(uint64_t *)src) == 0)
1402                 return -1;
1403
1404         return 0;
1405 }
1406
1407 void
1408 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1409 {
1410         struct rte_eth_dev *dev;
1411
1412         VALID_PORTID_OR_RET(port_id);
1413         dev = &rte_eth_devices[port_id];
1414
1415         if (dev->data->dev_conf.intr_conf.lsc != 0)
1416                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1417         else {
1418                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1419                 (*dev->dev_ops->link_update)(dev, 1);
1420                 *eth_link = dev->data->dev_link;
1421         }
1422 }
1423
1424 void
1425 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1426 {
1427         struct rte_eth_dev *dev;
1428
1429         VALID_PORTID_OR_RET(port_id);
1430         dev = &rte_eth_devices[port_id];
1431
1432         if (dev->data->dev_conf.intr_conf.lsc != 0)
1433                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1434         else {
1435                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1436                 (*dev->dev_ops->link_update)(dev, 0);
1437                 *eth_link = dev->data->dev_link;
1438         }
1439 }
1440
1441 int
1442 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1443 {
1444         struct rte_eth_dev *dev;
1445
1446         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1447
1448         dev = &rte_eth_devices[port_id];
1449         memset(stats, 0, sizeof(*stats));
1450
1451         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1452         (*dev->dev_ops->stats_get)(dev, stats);
1453         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1454         return 0;
1455 }
1456
1457 void
1458 rte_eth_stats_reset(uint8_t port_id)
1459 {
1460         struct rte_eth_dev *dev;
1461
1462         VALID_PORTID_OR_RET(port_id);
1463         dev = &rte_eth_devices[port_id];
1464
1465         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1466         (*dev->dev_ops->stats_reset)(dev);
1467 }
1468
1469 /* retrieve ethdev extended statistics */
1470 int
1471 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1472         unsigned n)
1473 {
1474         struct rte_eth_stats eth_stats;
1475         struct rte_eth_dev *dev;
1476         unsigned count = 0, i, q;
1477         signed xcount = 0;
1478         uint64_t val, *stats_ptr;
1479
1480         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1481
1482         dev = &rte_eth_devices[port_id];
1483
1484         /* Return generic statistics */
1485         count = RTE_NB_STATS;
1486
1487         /* implemented by the driver */
1488         if (dev->dev_ops->xstats_get != NULL) {
1489                 /* Retrieve the xstats from the driver at the end of the
1490                  * xstats struct.
1491                  */
1492                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1493                          (n > count) ? n - count : 0);
1494
1495                 if (xcount < 0)
1496                         return xcount;
1497         } else {
1498                 count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1499                 count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1500         }
1501
1502         if (n < count + xcount)
1503                 return count + xcount;
1504
1505         /* now fill the xstats structure */
1506         count = 0;
1507         rte_eth_stats_get(port_id, &eth_stats);
1508
1509         /* global stats */
1510         for (i = 0; i < RTE_NB_STATS; i++) {
1511                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1512                                         rte_stats_strings[i].offset);
1513                 val = *stats_ptr;
1514                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1515                         "%s", rte_stats_strings[i].name);
1516                 xstats[count++].value = val;
1517         }
1518
1519         /* if xstats_get() is implemented by the PMD, the Q stats are done */
1520         if (dev->dev_ops->xstats_get != NULL)
1521                 return count + xcount;
1522
1523         /* per-rxq stats */
1524         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1525                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1526                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1527                                         rte_rxq_stats_strings[i].offset +
1528                                         q * sizeof(uint64_t));
1529                         val = *stats_ptr;
1530                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1531                                 "rx_q%u_%s", q,
1532                                 rte_rxq_stats_strings[i].name);
1533                         xstats[count++].value = val;
1534                 }
1535         }
1536
1537         /* per-txq stats */
1538         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1539                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1540                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1541                                         rte_txq_stats_strings[i].offset +
1542                                         q * sizeof(uint64_t));
1543                         val = *stats_ptr;
1544                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1545                                 "tx_q%u_%s", q,
1546                                 rte_txq_stats_strings[i].name);
1547                         xstats[count++].value = val;
1548                 }
1549         }
1550
1551         return count + xcount;
1552 }
1553
1554 /* reset ethdev extended statistics */
1555 void
1556 rte_eth_xstats_reset(uint8_t port_id)
1557 {
1558         struct rte_eth_dev *dev;
1559
1560         VALID_PORTID_OR_RET(port_id);
1561         dev = &rte_eth_devices[port_id];
1562
1563         /* implemented by the driver */
1564         if (dev->dev_ops->xstats_reset != NULL) {
1565                 (*dev->dev_ops->xstats_reset)(dev);
1566                 return;
1567         }
1568
1569         /* fallback to default */
1570         rte_eth_stats_reset(port_id);
1571 }
1572
1573 static int
1574 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1575                 uint8_t is_rx)
1576 {
1577         struct rte_eth_dev *dev;
1578
1579         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1580
1581         dev = &rte_eth_devices[port_id];
1582
1583         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1584         return (*dev->dev_ops->queue_stats_mapping_set)
1585                         (dev, queue_id, stat_idx, is_rx);
1586 }
1587
1588
1589 int
1590 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1591                 uint8_t stat_idx)
1592 {
1593         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1594                         STAT_QMAP_TX);
1595 }
1596
1597
1598 int
1599 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1600                 uint8_t stat_idx)
1601 {
1602         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1603                         STAT_QMAP_RX);
1604 }
1605
1606
1607 void
1608 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1609 {
1610         struct rte_eth_dev *dev;
1611         const struct rte_eth_desc_lim lim = {
1612                 .nb_max = UINT16_MAX,
1613                 .nb_min = 0,
1614                 .nb_align = 1,
1615         };
1616
1617         VALID_PORTID_OR_RET(port_id);
1618         dev = &rte_eth_devices[port_id];
1619
1620         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1621         dev_info->rx_desc_lim = lim;
1622         dev_info->tx_desc_lim = lim;
1623
1624         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1625         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1626         dev_info->pci_dev = dev->pci_dev;
1627         if (dev->driver)
1628                 dev_info->driver_name = dev->driver->pci_drv.name;
1629 }
1630
1631 void
1632 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1633 {
1634         struct rte_eth_dev *dev;
1635
1636         VALID_PORTID_OR_RET(port_id);
1637         dev = &rte_eth_devices[port_id];
1638         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1639 }
1640
1641
1642 int
1643 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1644 {
1645         struct rte_eth_dev *dev;
1646
1647         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1648
1649         dev = &rte_eth_devices[port_id];
1650         *mtu = dev->data->mtu;
1651         return 0;
1652 }
1653
1654 int
1655 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1656 {
1657         int ret;
1658         struct rte_eth_dev *dev;
1659
1660         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1661         dev = &rte_eth_devices[port_id];
1662         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1663
1664         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1665         if (!ret)
1666                 dev->data->mtu = mtu;
1667
1668         return ret;
1669 }
1670
1671 int
1672 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1673 {
1674         struct rte_eth_dev *dev;
1675
1676         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1677         dev = &rte_eth_devices[port_id];
1678         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1679                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1680                 return -ENOSYS;
1681         }
1682
1683         if (vlan_id > 4095) {
1684                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1685                                 port_id, (unsigned) vlan_id);
1686                 return -EINVAL;
1687         }
1688         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1689
1690         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1691 }
1692
1693 int
1694 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1695 {
1696         struct rte_eth_dev *dev;
1697
1698         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1699         dev = &rte_eth_devices[port_id];
1700         if (rx_queue_id >= dev->data->nb_rx_queues) {
1701                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1702                 return -EINVAL;
1703         }
1704
1705         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1706         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1707
1708         return 0;
1709 }
1710
1711 int
1712 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1713 {
1714         struct rte_eth_dev *dev;
1715
1716         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1717         dev = &rte_eth_devices[port_id];
1718         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1719         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1720
1721         return 0;
1722 }
1723
1724 int
1725 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1726 {
1727         struct rte_eth_dev *dev;
1728         int ret = 0;
1729         int mask = 0;
1730         int cur, org = 0;
1731
1732         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1733         dev = &rte_eth_devices[port_id];
1734
1735         /*check which option changed by application*/
1736         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1737         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1738         if (cur != org) {
1739                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1740                 mask |= ETH_VLAN_STRIP_MASK;
1741         }
1742
1743         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1744         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1745         if (cur != org) {
1746                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1747                 mask |= ETH_VLAN_FILTER_MASK;
1748         }
1749
1750         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1751         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1752         if (cur != org) {
1753                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1754                 mask |= ETH_VLAN_EXTEND_MASK;
1755         }
1756
1757         /*no change*/
1758         if (mask == 0)
1759                 return ret;
1760
1761         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1762         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1763
1764         return ret;
1765 }
1766
1767 int
1768 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1769 {
1770         struct rte_eth_dev *dev;
1771         int ret = 0;
1772
1773         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1774         dev = &rte_eth_devices[port_id];
1775
1776         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1777                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1778
1779         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1780                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1781
1782         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1783                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1784
1785         return ret;
1786 }
1787
1788 int
1789 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1790 {
1791         struct rte_eth_dev *dev;
1792
1793         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1794         dev = &rte_eth_devices[port_id];
1795         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1796         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1797
1798         return 0;
1799 }
1800
1801 int
1802 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1803 {
1804         struct rte_eth_dev *dev;
1805
1806         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1807         dev = &rte_eth_devices[port_id];
1808         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1809         memset(fc_conf, 0, sizeof(*fc_conf));
1810         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1811 }
1812
1813 int
1814 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1815 {
1816         struct rte_eth_dev *dev;
1817
1818         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1819         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1820                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1821                 return -EINVAL;
1822         }
1823
1824         dev = &rte_eth_devices[port_id];
1825         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1826         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1827 }
1828
1829 int
1830 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1831 {
1832         struct rte_eth_dev *dev;
1833
1834         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1835         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1836                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1837                 return -EINVAL;
1838         }
1839
1840         dev = &rte_eth_devices[port_id];
1841         /* High water, low water validation are device specific */
1842         if  (*dev->dev_ops->priority_flow_ctrl_set)
1843                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1844         return -ENOTSUP;
1845 }
1846
1847 static int
1848 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1849                         uint16_t reta_size)
1850 {
1851         uint16_t i, num;
1852
1853         if (!reta_conf)
1854                 return -EINVAL;
1855
1856         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1857                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1858                                                         RTE_RETA_GROUP_SIZE);
1859                 return -EINVAL;
1860         }
1861
1862         num = reta_size / RTE_RETA_GROUP_SIZE;
1863         for (i = 0; i < num; i++) {
1864                 if (reta_conf[i].mask)
1865                         return 0;
1866         }
1867
1868         return -EINVAL;
1869 }
1870
1871 static int
1872 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1873                          uint16_t reta_size,
1874                          uint8_t max_rxq)
1875 {
1876         uint16_t i, idx, shift;
1877
1878         if (!reta_conf)
1879                 return -EINVAL;
1880
1881         if (max_rxq == 0) {
1882                 PMD_DEBUG_TRACE("No receive queue is available\n");
1883                 return -EINVAL;
1884         }
1885
1886         for (i = 0; i < reta_size; i++) {
1887                 idx = i / RTE_RETA_GROUP_SIZE;
1888                 shift = i % RTE_RETA_GROUP_SIZE;
1889                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1890                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1891                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1892                                 "the maximum rxq index: %u\n", idx, shift,
1893                                 reta_conf[idx].reta[shift], max_rxq);
1894                         return -EINVAL;
1895                 }
1896         }
1897
1898         return 0;
1899 }
1900
1901 int
1902 rte_eth_dev_rss_reta_update(uint8_t port_id,
1903                             struct rte_eth_rss_reta_entry64 *reta_conf,
1904                             uint16_t reta_size)
1905 {
1906         struct rte_eth_dev *dev;
1907         int ret;
1908
1909         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1910         /* Check mask bits */
1911         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1912         if (ret < 0)
1913                 return ret;
1914
1915         dev = &rte_eth_devices[port_id];
1916
1917         /* Check entry value */
1918         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1919                                 dev->data->nb_rx_queues);
1920         if (ret < 0)
1921                 return ret;
1922
1923         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1924         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1925 }
1926
1927 int
1928 rte_eth_dev_rss_reta_query(uint8_t port_id,
1929                            struct rte_eth_rss_reta_entry64 *reta_conf,
1930                            uint16_t reta_size)
1931 {
1932         struct rte_eth_dev *dev;
1933         int ret;
1934
1935         if (port_id >= nb_ports) {
1936                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1937                 return -ENODEV;
1938         }
1939
1940         /* Check mask bits */
1941         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1942         if (ret < 0)
1943                 return ret;
1944
1945         dev = &rte_eth_devices[port_id];
1946         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1947         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1948 }
1949
1950 int
1951 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1952 {
1953         struct rte_eth_dev *dev;
1954         uint16_t rss_hash_protos;
1955
1956         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1957         rss_hash_protos = rss_conf->rss_hf;
1958         if ((rss_hash_protos != 0) &&
1959             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1960                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1961                                 rss_hash_protos);
1962                 return -EINVAL;
1963         }
1964         dev = &rte_eth_devices[port_id];
1965         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1966         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1967 }
1968
1969 int
1970 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1971                               struct rte_eth_rss_conf *rss_conf)
1972 {
1973         struct rte_eth_dev *dev;
1974
1975         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1976         dev = &rte_eth_devices[port_id];
1977         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1978         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1979 }
1980
1981 int
1982 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
1983                            struct rte_eth_udp_tunnel *udp_tunnel)
1984 {
1985         struct rte_eth_dev *dev;
1986
1987         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1988         if (udp_tunnel == NULL) {
1989                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1990                 return -EINVAL;
1991         }
1992
1993         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1994                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
1995                 return -EINVAL;
1996         }
1997
1998         dev = &rte_eth_devices[port_id];
1999         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2000         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2001 }
2002
2003 int
2004 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2005                               struct rte_eth_udp_tunnel *udp_tunnel)
2006 {
2007         struct rte_eth_dev *dev;
2008
2009         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2010         dev = &rte_eth_devices[port_id];
2011
2012         if (udp_tunnel == NULL) {
2013                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2014                 return -EINVAL;
2015         }
2016
2017         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2018                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2019                 return -EINVAL;
2020         }
2021
2022         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2023         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2024 }
2025
2026 int
2027 rte_eth_led_on(uint8_t port_id)
2028 {
2029         struct rte_eth_dev *dev;
2030
2031         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2032         dev = &rte_eth_devices[port_id];
2033         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2034         return (*dev->dev_ops->dev_led_on)(dev);
2035 }
2036
2037 int
2038 rte_eth_led_off(uint8_t port_id)
2039 {
2040         struct rte_eth_dev *dev;
2041
2042         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2043         dev = &rte_eth_devices[port_id];
2044         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2045         return (*dev->dev_ops->dev_led_off)(dev);
2046 }
2047
2048 /*
2049  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2050  * an empty spot.
2051  */
2052 static int
2053 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2054 {
2055         struct rte_eth_dev_info dev_info;
2056         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2057         unsigned i;
2058
2059         rte_eth_dev_info_get(port_id, &dev_info);
2060
2061         for (i = 0; i < dev_info.max_mac_addrs; i++)
2062                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2063                         return i;
2064
2065         return -1;
2066 }
2067
2068 static const struct ether_addr null_mac_addr;
2069
2070 int
2071 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2072                         uint32_t pool)
2073 {
2074         struct rte_eth_dev *dev;
2075         int index;
2076         uint64_t pool_mask;
2077
2078         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2079         dev = &rte_eth_devices[port_id];
2080         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2081
2082         if (is_zero_ether_addr(addr)) {
2083                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2084                         port_id);
2085                 return -EINVAL;
2086         }
2087         if (pool >= ETH_64_POOLS) {
2088                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2089                 return -EINVAL;
2090         }
2091
2092         index = get_mac_addr_index(port_id, addr);
2093         if (index < 0) {
2094                 index = get_mac_addr_index(port_id, &null_mac_addr);
2095                 if (index < 0) {
2096                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2097                                 port_id);
2098                         return -ENOSPC;
2099                 }
2100         } else {
2101                 pool_mask = dev->data->mac_pool_sel[index];
2102
2103                 /* Check if both MAC address and pool is already there, and do nothing */
2104                 if (pool_mask & (1ULL << pool))
2105                         return 0;
2106         }
2107
2108         /* Update NIC */
2109         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2110
2111         /* Update address in NIC data structure */
2112         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2113
2114         /* Update pool bitmap in NIC data structure */
2115         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2116
2117         return 0;
2118 }
2119
2120 int
2121 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2122 {
2123         struct rte_eth_dev *dev;
2124         int index;
2125
2126         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2127         dev = &rte_eth_devices[port_id];
2128         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2129
2130         index = get_mac_addr_index(port_id, addr);
2131         if (index == 0) {
2132                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2133                 return -EADDRINUSE;
2134         } else if (index < 0)
2135                 return 0;  /* Do nothing if address wasn't found */
2136
2137         /* Update NIC */
2138         (*dev->dev_ops->mac_addr_remove)(dev, index);
2139
2140         /* Update address in NIC data structure */
2141         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2142
2143         /* reset pool bitmap */
2144         dev->data->mac_pool_sel[index] = 0;
2145
2146         return 0;
2147 }
2148
2149 int
2150 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2151 {
2152         struct rte_eth_dev *dev;
2153
2154         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2155
2156         if (!is_valid_assigned_ether_addr(addr))
2157                 return -EINVAL;
2158
2159         dev = &rte_eth_devices[port_id];
2160         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2161
2162         /* Update default address in NIC data structure */
2163         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2164
2165         (*dev->dev_ops->mac_addr_set)(dev, addr);
2166
2167         return 0;
2168 }
2169
2170 int
2171 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2172                                 uint16_t rx_mode, uint8_t on)
2173 {
2174         uint16_t num_vfs;
2175         struct rte_eth_dev *dev;
2176         struct rte_eth_dev_info dev_info;
2177
2178         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2179
2180         dev = &rte_eth_devices[port_id];
2181         rte_eth_dev_info_get(port_id, &dev_info);
2182
2183         num_vfs = dev_info.max_vfs;
2184         if (vf > num_vfs) {
2185                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2186                 return -EINVAL;
2187         }
2188
2189         if (rx_mode == 0) {
2190                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2191                 return -EINVAL;
2192         }
2193         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2194         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2195 }
2196
2197 /*
2198  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2199  * an empty spot.
2200  */
2201 static int
2202 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2203 {
2204         struct rte_eth_dev_info dev_info;
2205         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2206         unsigned i;
2207
2208         rte_eth_dev_info_get(port_id, &dev_info);
2209         if (!dev->data->hash_mac_addrs)
2210                 return -1;
2211
2212         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2213                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2214                         ETHER_ADDR_LEN) == 0)
2215                         return i;
2216
2217         return -1;
2218 }
2219
2220 int
2221 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2222                                 uint8_t on)
2223 {
2224         int index;
2225         int ret;
2226         struct rte_eth_dev *dev;
2227
2228         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2229
2230         dev = &rte_eth_devices[port_id];
2231         if (is_zero_ether_addr(addr)) {
2232                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2233                         port_id);
2234                 return -EINVAL;
2235         }
2236
2237         index = get_hash_mac_addr_index(port_id, addr);
2238         /* Check if it's already there, and do nothing */
2239         if ((index >= 0) && (on))
2240                 return 0;
2241
2242         if (index < 0) {
2243                 if (!on) {
2244                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2245                                 "set in UTA\n", port_id);
2246                         return -EINVAL;
2247                 }
2248
2249                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2250                 if (index < 0) {
2251                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2252                                         port_id);
2253                         return -ENOSPC;
2254                 }
2255         }
2256
2257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2258         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2259         if (ret == 0) {
2260                 /* Update address in NIC data structure */
2261                 if (on)
2262                         ether_addr_copy(addr,
2263                                         &dev->data->hash_mac_addrs[index]);
2264                 else
2265                         ether_addr_copy(&null_mac_addr,
2266                                         &dev->data->hash_mac_addrs[index]);
2267         }
2268
2269         return ret;
2270 }
2271
2272 int
2273 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2274 {
2275         struct rte_eth_dev *dev;
2276
2277         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2278
2279         dev = &rte_eth_devices[port_id];
2280
2281         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2282         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2283 }
2284
2285 int
2286 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2287 {
2288         uint16_t num_vfs;
2289         struct rte_eth_dev *dev;
2290         struct rte_eth_dev_info dev_info;
2291
2292         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2293
2294         dev = &rte_eth_devices[port_id];
2295         rte_eth_dev_info_get(port_id, &dev_info);
2296
2297         num_vfs = dev_info.max_vfs;
2298         if (vf > num_vfs) {
2299                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2300                 return -EINVAL;
2301         }
2302
2303         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2304         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2305 }
2306
2307 int
2308 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2309 {
2310         uint16_t num_vfs;
2311         struct rte_eth_dev *dev;
2312         struct rte_eth_dev_info dev_info;
2313
2314         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2315
2316         dev = &rte_eth_devices[port_id];
2317         rte_eth_dev_info_get(port_id, &dev_info);
2318
2319         num_vfs = dev_info.max_vfs;
2320         if (vf > num_vfs) {
2321                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2322                 return -EINVAL;
2323         }
2324
2325         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2326         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2327 }
2328
2329 int
2330 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2331                                uint64_t vf_mask, uint8_t vlan_on)
2332 {
2333         struct rte_eth_dev *dev;
2334
2335         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2336
2337         dev = &rte_eth_devices[port_id];
2338
2339         if (vlan_id > ETHER_MAX_VLAN_ID) {
2340                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2341                         vlan_id);
2342                 return -EINVAL;
2343         }
2344
2345         if (vf_mask == 0) {
2346                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2347                 return -EINVAL;
2348         }
2349
2350         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2351         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2352                                                    vf_mask, vlan_on);
2353 }
2354
2355 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2356                                         uint16_t tx_rate)
2357 {
2358         struct rte_eth_dev *dev;
2359         struct rte_eth_dev_info dev_info;
2360         struct rte_eth_link link;
2361
2362         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363
2364         dev = &rte_eth_devices[port_id];
2365         rte_eth_dev_info_get(port_id, &dev_info);
2366         link = dev->data->dev_link;
2367
2368         if (queue_idx > dev_info.max_tx_queues) {
2369                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2370                                 "invalid queue id=%d\n", port_id, queue_idx);
2371                 return -EINVAL;
2372         }
2373
2374         if (tx_rate > link.link_speed) {
2375                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2376                                 "bigger than link speed= %d\n",
2377                         tx_rate, link.link_speed);
2378                 return -EINVAL;
2379         }
2380
2381         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2382         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2383 }
2384
2385 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2386                                 uint64_t q_msk)
2387 {
2388         struct rte_eth_dev *dev;
2389         struct rte_eth_dev_info dev_info;
2390         struct rte_eth_link link;
2391
2392         if (q_msk == 0)
2393                 return 0;
2394
2395         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2396
2397         dev = &rte_eth_devices[port_id];
2398         rte_eth_dev_info_get(port_id, &dev_info);
2399         link = dev->data->dev_link;
2400
2401         if (vf > dev_info.max_vfs) {
2402                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2403                                 "invalid vf id=%d\n", port_id, vf);
2404                 return -EINVAL;
2405         }
2406
2407         if (tx_rate > link.link_speed) {
2408                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2409                                 "bigger than link speed= %d\n",
2410                                 tx_rate, link.link_speed);
2411                 return -EINVAL;
2412         }
2413
2414         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2415         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2416 }
2417
2418 int
2419 rte_eth_mirror_rule_set(uint8_t port_id,
2420                         struct rte_eth_mirror_conf *mirror_conf,
2421                         uint8_t rule_id, uint8_t on)
2422 {
2423         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2424
2425         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2426         if (mirror_conf->rule_type == 0) {
2427                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2428                 return -EINVAL;
2429         }
2430
2431         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2432                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2433                                 ETH_64_POOLS - 1);
2434                 return -EINVAL;
2435         }
2436
2437         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2438              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2439             (mirror_conf->pool_mask == 0)) {
2440                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2441                 return -EINVAL;
2442         }
2443
2444         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2445             mirror_conf->vlan.vlan_mask == 0) {
2446                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2447                 return -EINVAL;
2448         }
2449
2450         dev = &rte_eth_devices[port_id];
2451         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2452
2453         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2454 }
2455
2456 int
2457 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2458 {
2459         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2460
2461         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2462
2463         dev = &rte_eth_devices[port_id];
2464         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2465
2466         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2467 }
2468
2469 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2470 uint16_t
2471 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2472                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2473 {
2474         struct rte_eth_dev *dev;
2475
2476         VALID_PORTID_OR_ERR_RET(port_id, 0);
2477
2478         dev = &rte_eth_devices[port_id];
2479         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2480         if (queue_id >= dev->data->nb_rx_queues) {
2481                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2482                 return 0;
2483         }
2484         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2485                                                 rx_pkts, nb_pkts);
2486 }
2487
2488 uint16_t
2489 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2490                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2491 {
2492         struct rte_eth_dev *dev;
2493
2494         VALID_PORTID_OR_ERR_RET(port_id, 0);
2495
2496         dev = &rte_eth_devices[port_id];
2497
2498         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2499         if (queue_id >= dev->data->nb_tx_queues) {
2500                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2501                 return 0;
2502         }
2503         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2504                                                 tx_pkts, nb_pkts);
2505 }
2506
2507 uint32_t
2508 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2509 {
2510         struct rte_eth_dev *dev;
2511
2512         VALID_PORTID_OR_ERR_RET(port_id, 0);
2513
2514         dev = &rte_eth_devices[port_id];
2515         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2516         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2517 }
2518
2519 int
2520 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2521 {
2522         struct rte_eth_dev *dev;
2523
2524         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2525
2526         dev = &rte_eth_devices[port_id];
2527         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2528         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2529                                                    offset);
2530 }
2531 #endif
2532
2533 int
2534 rte_eth_dev_callback_register(uint8_t port_id,
2535                         enum rte_eth_event_type event,
2536                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2537 {
2538         struct rte_eth_dev *dev;
2539         struct rte_eth_dev_callback *user_cb;
2540
2541         if (!cb_fn)
2542                 return -EINVAL;
2543
2544         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2545
2546         dev = &rte_eth_devices[port_id];
2547         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2548
2549         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2550                 if (user_cb->cb_fn == cb_fn &&
2551                         user_cb->cb_arg == cb_arg &&
2552                         user_cb->event == event) {
2553                         break;
2554                 }
2555         }
2556
2557         /* create a new callback. */
2558         if (user_cb == NULL)
2559                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2560                                       sizeof(struct rte_eth_dev_callback), 0);
2561         if (user_cb != NULL) {
2562                 user_cb->cb_fn = cb_fn;
2563                 user_cb->cb_arg = cb_arg;
2564                 user_cb->event = event;
2565                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2566         }
2567
2568         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2569         return (user_cb == NULL) ? -ENOMEM : 0;
2570 }
2571
2572 int
2573 rte_eth_dev_callback_unregister(uint8_t port_id,
2574                         enum rte_eth_event_type event,
2575                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2576 {
2577         int ret;
2578         struct rte_eth_dev *dev;
2579         struct rte_eth_dev_callback *cb, *next;
2580
2581         if (!cb_fn)
2582                 return -EINVAL;
2583
2584         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2585
2586         dev = &rte_eth_devices[port_id];
2587         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2588
2589         ret = 0;
2590         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2591
2592                 next = TAILQ_NEXT(cb, next);
2593
2594                 if (cb->cb_fn != cb_fn || cb->event != event ||
2595                                 (cb->cb_arg != (void *)-1 &&
2596                                 cb->cb_arg != cb_arg))
2597                         continue;
2598
2599                 /*
2600                  * if this callback is not executing right now,
2601                  * then remove it.
2602                  */
2603                 if (cb->active == 0) {
2604                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2605                         rte_free(cb);
2606                 } else {
2607                         ret = -EAGAIN;
2608                 }
2609         }
2610
2611         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2612         return ret;
2613 }
2614
2615 void
2616 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2617         enum rte_eth_event_type event)
2618 {
2619         struct rte_eth_dev_callback *cb_lst;
2620         struct rte_eth_dev_callback dev_cb;
2621
2622         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2623         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2624                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2625                         continue;
2626                 dev_cb = *cb_lst;
2627                 cb_lst->active = 1;
2628                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2629                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2630                                                 dev_cb.cb_arg);
2631                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2632                 cb_lst->active = 0;
2633         }
2634         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2635 }
2636
2637 int
2638 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2639 {
2640         uint32_t vec;
2641         struct rte_eth_dev *dev;
2642         struct rte_intr_handle *intr_handle;
2643         uint16_t qid;
2644         int rc;
2645
2646         if (!rte_eth_dev_is_valid_port(port_id)) {
2647                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2648                 return -ENODEV;
2649         }
2650
2651         dev = &rte_eth_devices[port_id];
2652         intr_handle = &dev->pci_dev->intr_handle;
2653         if (!intr_handle->intr_vec) {
2654                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2655                 return -EPERM;
2656         }
2657
2658         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2659                 vec = intr_handle->intr_vec[qid];
2660                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2661                 if (rc && rc != -EEXIST) {
2662                         PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2663                                         " op %d epfd %d vec %u\n",
2664                                         port_id, qid, op, epfd, vec);
2665                 }
2666         }
2667
2668         return 0;
2669 }
2670
2671 int
2672 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2673                           int epfd, int op, void *data)
2674 {
2675         uint32_t vec;
2676         struct rte_eth_dev *dev;
2677         struct rte_intr_handle *intr_handle;
2678         int rc;
2679
2680         if (!rte_eth_dev_is_valid_port(port_id)) {
2681                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2682                 return -ENODEV;
2683         }
2684
2685         dev = &rte_eth_devices[port_id];
2686         if (queue_id >= dev->data->nb_rx_queues) {
2687                 PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2688                 return -EINVAL;
2689         }
2690
2691         intr_handle = &dev->pci_dev->intr_handle;
2692         if (!intr_handle->intr_vec) {
2693                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2694                 return -EPERM;
2695         }
2696
2697         vec = intr_handle->intr_vec[queue_id];
2698         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2699         if (rc && rc != -EEXIST) {
2700                 PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2701                                 " op %d epfd %d vec %u\n",
2702                                 port_id, queue_id, op, epfd, vec);
2703                 return rc;
2704         }
2705
2706         return 0;
2707 }
2708
2709 int
2710 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2711                            uint16_t queue_id)
2712 {
2713         struct rte_eth_dev *dev;
2714
2715         if (!rte_eth_dev_is_valid_port(port_id)) {
2716                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2717                 return -ENODEV;
2718         }
2719
2720         dev = &rte_eth_devices[port_id];
2721
2722         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2723         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2724 }
2725
2726 int
2727 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2728                             uint16_t queue_id)
2729 {
2730         struct rte_eth_dev *dev;
2731
2732         if (!rte_eth_dev_is_valid_port(port_id)) {
2733                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2734                 return -ENODEV;
2735         }
2736
2737         dev = &rte_eth_devices[port_id];
2738
2739         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2740         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2741 }
2742
2743 #ifdef RTE_NIC_BYPASS
2744 int rte_eth_dev_bypass_init(uint8_t port_id)
2745 {
2746         struct rte_eth_dev *dev;
2747
2748         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2749
2750         dev = &rte_eth_devices[port_id];
2751         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2752         (*dev->dev_ops->bypass_init)(dev);
2753         return 0;
2754 }
2755
2756 int
2757 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2758 {
2759         struct rte_eth_dev *dev;
2760
2761         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2762
2763         dev = &rte_eth_devices[port_id];
2764         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2765         (*dev->dev_ops->bypass_state_show)(dev, state);
2766         return 0;
2767 }
2768
2769 int
2770 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2771 {
2772         struct rte_eth_dev *dev;
2773
2774         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2775
2776         dev = &rte_eth_devices[port_id];
2777         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2778         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2779         return 0;
2780 }
2781
2782 int
2783 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2784 {
2785         struct rte_eth_dev *dev;
2786
2787         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2788
2789         dev = &rte_eth_devices[port_id];
2790         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2791         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2792         return 0;
2793 }
2794
2795 int
2796 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2797 {
2798         struct rte_eth_dev *dev;
2799
2800         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2801
2802         dev = &rte_eth_devices[port_id];
2803
2804         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2805         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2806         return 0;
2807 }
2808
2809 int
2810 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2811 {
2812         struct rte_eth_dev *dev;
2813
2814         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2815
2816         dev = &rte_eth_devices[port_id];
2817
2818         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2819         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2820         return 0;
2821 }
2822
2823 int
2824 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2825 {
2826         struct rte_eth_dev *dev;
2827
2828         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2829
2830         dev = &rte_eth_devices[port_id];
2831
2832         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2833         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2834         return 0;
2835 }
2836
2837 int
2838 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2839 {
2840         struct rte_eth_dev *dev;
2841
2842         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2843
2844         dev = &rte_eth_devices[port_id];
2845
2846         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2847         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2848         return 0;
2849 }
2850
2851 int
2852 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2853 {
2854         struct rte_eth_dev *dev;
2855
2856         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857
2858         dev = &rte_eth_devices[port_id];
2859
2860         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2861         (*dev->dev_ops->bypass_wd_reset)(dev);
2862         return 0;
2863 }
2864 #endif
2865
2866 int
2867 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2868 {
2869         struct rte_eth_dev *dev;
2870
2871         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2872
2873         dev = &rte_eth_devices[port_id];
2874         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2875         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2876                                 RTE_ETH_FILTER_NOP, NULL);
2877 }
2878
2879 int
2880 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2881                        enum rte_filter_op filter_op, void *arg)
2882 {
2883         struct rte_eth_dev *dev;
2884
2885         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2886
2887         dev = &rte_eth_devices[port_id];
2888         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2889         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2890 }
2891
2892 void *
2893 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2894                 rte_rx_callback_fn fn, void *user_param)
2895 {
2896 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2897         rte_errno = ENOTSUP;
2898         return NULL;
2899 #endif
2900         /* check input parameters */
2901         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2902                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2903                 rte_errno = EINVAL;
2904                 return NULL;
2905         }
2906
2907         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2908
2909         if (cb == NULL) {
2910                 rte_errno = ENOMEM;
2911                 return NULL;
2912         }
2913
2914         cb->fn.rx = fn;
2915         cb->param = user_param;
2916
2917         /* Add the callbacks in fifo order. */
2918         struct rte_eth_rxtx_callback *tail =
2919                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2920
2921         if (!tail) {
2922                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2923
2924         } else {
2925                 while (tail->next)
2926                         tail = tail->next;
2927                 tail->next = cb;
2928         }
2929
2930         return cb;
2931 }
2932
2933 void *
2934 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2935                 rte_tx_callback_fn fn, void *user_param)
2936 {
2937 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2938         rte_errno = ENOTSUP;
2939         return NULL;
2940 #endif
2941         /* check input parameters */
2942         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2943                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2944                 rte_errno = EINVAL;
2945                 return NULL;
2946         }
2947
2948         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2949
2950         if (cb == NULL) {
2951                 rte_errno = ENOMEM;
2952                 return NULL;
2953         }
2954
2955         cb->fn.tx = fn;
2956         cb->param = user_param;
2957
2958         /* Add the callbacks in fifo order. */
2959         struct rte_eth_rxtx_callback *tail =
2960                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2961
2962         if (!tail) {
2963                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2964
2965         } else {
2966                 while (tail->next)
2967                         tail = tail->next;
2968                 tail->next = cb;
2969         }
2970
2971         return cb;
2972 }
2973
2974 int
2975 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2976                 struct rte_eth_rxtx_callback *user_cb)
2977 {
2978 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2979         return -ENOTSUP;
2980 #endif
2981         /* Check input parameters. */
2982         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
2983                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2984                 return -EINVAL;
2985         }
2986
2987         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2988         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2989         struct rte_eth_rxtx_callback *prev_cb;
2990
2991         /* Reset head pointer and remove user cb if first in the list. */
2992         if (cb == user_cb) {
2993                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
2994                 return 0;
2995         }
2996
2997         /* Remove the user cb from the callback list. */
2998         do {
2999                 prev_cb = cb;
3000                 cb = cb->next;
3001
3002                 if (cb == user_cb) {
3003                         prev_cb->next = user_cb->next;
3004                         return 0;
3005                 }
3006
3007         } while (cb != NULL);
3008
3009         /* Callback wasn't found. */
3010         return -EINVAL;
3011 }
3012
3013 int
3014 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3015                 struct rte_eth_rxtx_callback *user_cb)
3016 {
3017 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3018         return -ENOTSUP;
3019 #endif
3020         /* Check input parameters. */
3021         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3022                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3023                 return -EINVAL;
3024         }
3025
3026         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3027         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3028         struct rte_eth_rxtx_callback *prev_cb;
3029
3030         /* Reset head pointer and remove user cb if first in the list. */
3031         if (cb == user_cb) {
3032                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3033                 return 0;
3034         }
3035
3036         /* Remove the user cb from the callback list. */
3037         do {
3038                 prev_cb = cb;
3039                 cb = cb->next;
3040
3041                 if (cb == user_cb) {
3042                         prev_cb->next = user_cb->next;
3043                         return 0;
3044                 }
3045
3046         } while (cb != NULL);
3047
3048         /* Callback wasn't found. */
3049         return -EINVAL;
3050 }
3051
3052 int
3053 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3054         struct rte_eth_rxq_info *qinfo)
3055 {
3056         struct rte_eth_dev *dev;
3057
3058         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3059
3060         if (qinfo == NULL)
3061                 return -EINVAL;
3062
3063         dev = &rte_eth_devices[port_id];
3064         if (queue_id >= dev->data->nb_rx_queues) {
3065                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3066                 return -EINVAL;
3067         }
3068
3069         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3070
3071         memset(qinfo, 0, sizeof(*qinfo));
3072         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3073         return 0;
3074 }
3075
3076 int
3077 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3078         struct rte_eth_txq_info *qinfo)
3079 {
3080         struct rte_eth_dev *dev;
3081
3082         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3083
3084         if (qinfo == NULL)
3085                 return -EINVAL;
3086
3087         dev = &rte_eth_devices[port_id];
3088         if (queue_id >= dev->data->nb_tx_queues) {
3089                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3090                 return -EINVAL;
3091         }
3092
3093         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3094
3095         memset(qinfo, 0, sizeof(*qinfo));
3096         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3097         return 0;
3098 }
3099
3100 int
3101 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3102                              struct ether_addr *mc_addr_set,
3103                              uint32_t nb_mc_addr)
3104 {
3105         struct rte_eth_dev *dev;
3106
3107         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3108
3109         dev = &rte_eth_devices[port_id];
3110         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3111         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3112 }
3113
3114 int
3115 rte_eth_timesync_enable(uint8_t port_id)
3116 {
3117         struct rte_eth_dev *dev;
3118
3119         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3120         dev = &rte_eth_devices[port_id];
3121
3122         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3123         return (*dev->dev_ops->timesync_enable)(dev);
3124 }
3125
3126 int
3127 rte_eth_timesync_disable(uint8_t port_id)
3128 {
3129         struct rte_eth_dev *dev;
3130
3131         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3132         dev = &rte_eth_devices[port_id];
3133
3134         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3135         return (*dev->dev_ops->timesync_disable)(dev);
3136 }
3137
3138 int
3139 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3140                                    uint32_t flags)
3141 {
3142         struct rte_eth_dev *dev;
3143
3144         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3145         dev = &rte_eth_devices[port_id];
3146
3147         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3148         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3149 }
3150
3151 int
3152 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3153 {
3154         struct rte_eth_dev *dev;
3155
3156         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3157         dev = &rte_eth_devices[port_id];
3158
3159         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3160         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3161 }
3162
3163 int
3164 rte_eth_dev_get_reg_length(uint8_t port_id)
3165 {
3166         struct rte_eth_dev *dev;
3167
3168         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3169
3170         dev = &rte_eth_devices[port_id];
3171         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3172         return (*dev->dev_ops->get_reg_length)(dev);
3173 }
3174
3175 int
3176 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3177 {
3178         struct rte_eth_dev *dev;
3179
3180         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3181
3182         dev = &rte_eth_devices[port_id];
3183         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3184         return (*dev->dev_ops->get_reg)(dev, info);
3185 }
3186
3187 int
3188 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3189 {
3190         struct rte_eth_dev *dev;
3191
3192         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3193
3194         dev = &rte_eth_devices[port_id];
3195         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3196         return (*dev->dev_ops->get_eeprom_length)(dev);
3197 }
3198
3199 int
3200 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3201 {
3202         struct rte_eth_dev *dev;
3203
3204         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3205
3206         dev = &rte_eth_devices[port_id];
3207         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3208         return (*dev->dev_ops->get_eeprom)(dev, info);
3209 }
3210
3211 int
3212 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3213 {
3214         struct rte_eth_dev *dev;
3215
3216         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3217
3218         dev = &rte_eth_devices[port_id];
3219         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3220         return (*dev->dev_ops->set_eeprom)(dev, info);
3221 }
3222
3223 int
3224 rte_eth_dev_get_dcb_info(uint8_t port_id,
3225                              struct rte_eth_dcb_info *dcb_info)
3226 {
3227         struct rte_eth_dev *dev;
3228
3229         if (!rte_eth_dev_is_valid_port(port_id)) {
3230                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3231                 return -ENODEV;
3232         }
3233
3234         dev = &rte_eth_devices[port_id];
3235         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3236
3237         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3238         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3239 }