ethdev: remove some PCI specific handling
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
145         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
146         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
147                 rx_nombuf)},
148 };
149
150 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
151
152 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
153         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
154         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
155         {"errors", offsetof(struct rte_eth_stats, q_errors)},
156 };
157
158 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
159                 sizeof(rte_rxq_stats_strings[0]))
160
161 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
162         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
163         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
164 };
165 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
166                 sizeof(rte_txq_stats_strings[0]))
167
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 enum {
189         DEV_DETACHED = 0,
190         DEV_ATTACHED
191 };
192
193 static void
194 rte_eth_dev_data_alloc(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
200                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
202                                 rte_socket_id(), flags);
203         } else
204                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
205         if (mz == NULL)
206                 rte_panic("Cannot allocate memzone for ethernet port data\n");
207
208         rte_eth_dev_data = mz->addr;
209         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210                 memset(rte_eth_dev_data, 0,
211                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
212 }
213
214 struct rte_eth_dev *
215 rte_eth_dev_allocated(const char *name)
216 {
217         unsigned i;
218
219         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
220                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
221                     strcmp(rte_eth_devices[i].data->name, name) == 0)
222                         return &rte_eth_devices[i];
223         }
224         return NULL;
225 }
226
227 static uint8_t
228 rte_eth_dev_find_free_port(void)
229 {
230         unsigned i;
231
232         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
233                 if (rte_eth_devices[i].attached == DEV_DETACHED)
234                         return i;
235         }
236         return RTE_MAX_ETHPORTS;
237 }
238
239 struct rte_eth_dev *
240 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
241 {
242         uint8_t port_id;
243         struct rte_eth_dev *eth_dev;
244
245         port_id = rte_eth_dev_find_free_port();
246         if (port_id == RTE_MAX_ETHPORTS) {
247                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
248                 return NULL;
249         }
250
251         if (rte_eth_dev_data == NULL)
252                 rte_eth_dev_data_alloc();
253
254         if (rte_eth_dev_allocated(name) != NULL) {
255                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
256                                 name);
257                 return NULL;
258         }
259
260         eth_dev = &rte_eth_devices[port_id];
261         eth_dev->data = &rte_eth_dev_data[port_id];
262         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
263         eth_dev->data->port_id = port_id;
264         eth_dev->attached = DEV_ATTACHED;
265         eth_dev->dev_type = type;
266         nb_ports++;
267         return eth_dev;
268 }
269
270 static int
271 rte_eth_dev_create_unique_device_name(char *name, size_t size,
272                 struct rte_pci_device *pci_dev)
273 {
274         int ret;
275
276         if ((name == NULL) || (pci_dev == NULL))
277                 return -EINVAL;
278
279         ret = snprintf(name, size, "%d:%d.%d",
280                         pci_dev->addr.bus, pci_dev->addr.devid,
281                         pci_dev->addr.function);
282         if (ret < 0)
283                 return ret;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
289 {
290         if (eth_dev == NULL)
291                 return -EINVAL;
292
293         eth_dev->attached = DEV_DETACHED;
294         nb_ports--;
295         return 0;
296 }
297
298 static int
299 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
300                  struct rte_pci_device *pci_dev)
301 {
302         struct eth_driver    *eth_drv;
303         struct rte_eth_dev *eth_dev;
304         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
305
306         int diag;
307
308         eth_drv = (struct eth_driver *)pci_drv;
309
310         /* Create unique Ethernet device name using PCI address */
311         rte_eth_dev_create_unique_device_name(ethdev_name,
312                         sizeof(ethdev_name), pci_dev);
313
314         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
315         if (eth_dev == NULL)
316                 return -ENOMEM;
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
319                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
320                                   eth_drv->dev_private_size,
321                                   RTE_CACHE_LINE_SIZE);
322                 if (eth_dev->data->dev_private == NULL)
323                         rte_panic("Cannot allocate memzone for private port data\n");
324         }
325         eth_dev->pci_dev = pci_dev;
326         eth_dev->driver = eth_drv;
327         eth_dev->data->rx_mbuf_alloc_failed = 0;
328
329         /* init user callbacks */
330         TAILQ_INIT(&(eth_dev->link_intr_cbs));
331
332         /*
333          * Set the default MTU.
334          */
335         eth_dev->data->mtu = ETHER_MTU;
336
337         /* Invoke PMD device initialization function */
338         diag = (*eth_drv->eth_dev_init)(eth_dev);
339         if (diag == 0)
340                 return 0;
341
342         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
343                         pci_drv->name,
344                         (unsigned) pci_dev->id.vendor_id,
345                         (unsigned) pci_dev->id.device_id);
346         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
347                 rte_free(eth_dev->data->dev_private);
348         rte_eth_dev_release_port(eth_dev);
349         return diag;
350 }
351
352 static int
353 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
354 {
355         const struct eth_driver *eth_drv;
356         struct rte_eth_dev *eth_dev;
357         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
358         int ret;
359
360         if (pci_dev == NULL)
361                 return -EINVAL;
362
363         /* Create unique Ethernet device name using PCI address */
364         rte_eth_dev_create_unique_device_name(ethdev_name,
365                         sizeof(ethdev_name), pci_dev);
366
367         eth_dev = rte_eth_dev_allocated(ethdev_name);
368         if (eth_dev == NULL)
369                 return -ENODEV;
370
371         eth_drv = (const struct eth_driver *)pci_dev->driver;
372
373         /* Invoke PMD device uninit function */
374         if (*eth_drv->eth_dev_uninit) {
375                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
376                 if (ret)
377                         return ret;
378         }
379
380         /* free ether device */
381         rte_eth_dev_release_port(eth_dev);
382
383         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
384                 rte_free(eth_dev->data->dev_private);
385
386         eth_dev->pci_dev = NULL;
387         eth_dev->driver = NULL;
388         eth_dev->data = NULL;
389
390         return 0;
391 }
392
393 /**
394  * Register an Ethernet [Poll Mode] driver.
395  *
396  * Function invoked by the initialization function of an Ethernet driver
397  * to simultaneously register itself as a PCI driver and as an Ethernet
398  * Poll Mode Driver.
399  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
400  * structure embedded in the *eth_drv* structure, after having stored the
401  * address of the rte_eth_dev_init() function in the *devinit* field of
402  * the *pci_drv* structure.
403  * During the PCI probing phase, the rte_eth_dev_init() function is
404  * invoked for each PCI [Ethernet device] matching the embedded PCI
405  * identifiers provided by the driver.
406  */
407 void
408 rte_eth_driver_register(struct eth_driver *eth_drv)
409 {
410         eth_drv->pci_drv.devinit = rte_eth_dev_init;
411         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
412         rte_eal_pci_register(&eth_drv->pci_drv);
413 }
414
415 int
416 rte_eth_dev_is_valid_port(uint8_t port_id)
417 {
418         if (port_id >= RTE_MAX_ETHPORTS ||
419             rte_eth_devices[port_id].attached != DEV_ATTACHED)
420                 return 0;
421         else
422                 return 1;
423 }
424
425 int
426 rte_eth_dev_socket_id(uint8_t port_id)
427 {
428         if (!rte_eth_dev_is_valid_port(port_id))
429                 return -1;
430         return rte_eth_devices[port_id].data->numa_node;
431 }
432
433 uint8_t
434 rte_eth_dev_count(void)
435 {
436         return nb_ports;
437 }
438
439 static enum rte_eth_dev_type
440 rte_eth_dev_get_device_type(uint8_t port_id)
441 {
442         if (!rte_eth_dev_is_valid_port(port_id))
443                 return RTE_ETH_DEV_UNKNOWN;
444         return rte_eth_devices[port_id].dev_type;
445 }
446
447 static int
448 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
449 {
450         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
451
452         if (addr == NULL) {
453                 PMD_DEBUG_TRACE("Null pointer is specified\n");
454                 return -EINVAL;
455         }
456
457         *addr = rte_eth_devices[port_id].pci_dev->addr;
458         return 0;
459 }
460
461 static int
462 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
463 {
464         char *tmp;
465
466         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
467
468         if (name == NULL) {
469                 PMD_DEBUG_TRACE("Null pointer is specified\n");
470                 return -EINVAL;
471         }
472
473         /* shouldn't check 'rte_eth_devices[i].data',
474          * because it might be overwritten by VDEV PMD */
475         tmp = rte_eth_dev_data[port_id].name;
476         strcpy(name, tmp);
477         return 0;
478 }
479
480 static int
481 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
482 {
483         int i;
484
485         if (name == NULL) {
486                 PMD_DEBUG_TRACE("Null pointer is specified\n");
487                 return -EINVAL;
488         }
489
490         *port_id = RTE_MAX_ETHPORTS;
491
492         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
493
494                 if (!strncmp(name,
495                         rte_eth_dev_data[i].name, strlen(name))) {
496
497                         *port_id = i;
498
499                         return 0;
500                 }
501         }
502         return -ENODEV;
503 }
504
505 static int
506 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
507 {
508         int i;
509         struct rte_pci_device *pci_dev = NULL;
510
511         if (addr == NULL) {
512                 PMD_DEBUG_TRACE("Null pointer is specified\n");
513                 return -EINVAL;
514         }
515
516         *port_id = RTE_MAX_ETHPORTS;
517
518         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
519
520                 pci_dev = rte_eth_devices[i].pci_dev;
521
522                 if (pci_dev &&
523                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
524
525                         *port_id = i;
526
527                         return 0;
528                 }
529         }
530         return -ENODEV;
531 }
532
533 static int
534 rte_eth_dev_is_detachable(uint8_t port_id)
535 {
536         uint32_t dev_flags;
537
538         if (!rte_eth_dev_is_valid_port(port_id)) {
539                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
540                 return -EINVAL;
541         }
542
543         switch (rte_eth_devices[port_id].data->kdrv) {
544         case RTE_KDRV_IGB_UIO:
545         case RTE_KDRV_UIO_GENERIC:
546         case RTE_KDRV_NIC_UIO:
547         case RTE_KDRV_NONE:
548                 break;
549         case RTE_KDRV_VFIO:
550         default:
551                 return -ENOTSUP;
552         }
553         dev_flags = rte_eth_devices[port_id].data->dev_flags;
554         return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
555 }
556
557 /* attach the new physical device, then store port_id of the device */
558 static int
559 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
560 {
561         if ((addr == NULL) || (port_id == NULL))
562                 goto err;
563
564         /* re-construct pci_device_list */
565         if (rte_eal_pci_scan())
566                 goto err;
567         /* Invoke probe func of the driver can handle the new device. */
568         if (rte_eal_pci_probe_one(addr))
569                 goto err;
570
571         if (rte_eth_dev_get_port_by_addr(addr, port_id))
572                 goto err;
573
574         return 0;
575 err:
576         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
577         return -1;
578 }
579
580 /* detach the new physical device, then store pci_addr of the device */
581 static int
582 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
583 {
584         struct rte_pci_addr freed_addr;
585         struct rte_pci_addr vp;
586
587         if (addr == NULL)
588                 goto err;
589
590         /* check whether the driver supports detach feature, or not */
591         if (rte_eth_dev_is_detachable(port_id))
592                 goto err;
593
594         /* get pci address by port id */
595         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
596                 goto err;
597
598         /* Zeroed pci addr means the port comes from virtual device */
599         vp.domain = vp.bus = vp.devid = vp.function = 0;
600         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
601                 goto err;
602
603         /* invoke devuninit func of the pci driver,
604          * also remove the device from pci_device_list */
605         if (rte_eal_pci_detach(&freed_addr))
606                 goto err;
607
608         *addr = freed_addr;
609         return 0;
610 err:
611         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
612         return -1;
613 }
614
615 /* attach the new virtual device, then store port_id of the device */
616 static int
617 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
618 {
619         char *name = NULL, *args = NULL;
620         int ret = -1;
621
622         if ((vdevargs == NULL) || (port_id == NULL))
623                 goto end;
624
625         /* parse vdevargs, then retrieve device name and args */
626         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
627                 goto end;
628
629         /* walk around dev_driver_list to find the driver of the device,
630          * then invoke probe function of the driver.
631          * rte_eal_vdev_init() updates port_id allocated after
632          * initialization.
633          */
634         if (rte_eal_vdev_init(name, args))
635                 goto end;
636
637         if (rte_eth_dev_get_port_by_name(name, port_id))
638                 goto end;
639
640         ret = 0;
641 end:
642         if (name)
643                 free(name);
644         if (args)
645                 free(args);
646
647         if (ret < 0)
648                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
649         return ret;
650 }
651
652 /* detach the new virtual device, then store the name of the device */
653 static int
654 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
655 {
656         char name[RTE_ETH_NAME_MAX_LEN];
657
658         if (vdevname == NULL)
659                 goto err;
660
661         /* check whether the driver supports detach feature, or not */
662         if (rte_eth_dev_is_detachable(port_id))
663                 goto err;
664
665         /* get device name by port id */
666         if (rte_eth_dev_get_name_by_port(port_id, name))
667                 goto err;
668         /* walk around dev_driver_list to find the driver of the device,
669          * then invoke uninit function of the driver */
670         if (rte_eal_vdev_uninit(name))
671                 goto err;
672
673         strncpy(vdevname, name, sizeof(name));
674         return 0;
675 err:
676         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
677         return -1;
678 }
679
680 /* attach the new device, then store port_id of the device */
681 int
682 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
683 {
684         struct rte_pci_addr addr;
685
686         if ((devargs == NULL) || (port_id == NULL))
687                 return -EINVAL;
688
689         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
690                 return rte_eth_dev_attach_pdev(&addr, port_id);
691         else
692                 return rte_eth_dev_attach_vdev(devargs, port_id);
693 }
694
695 /* detach the device, then store the name of the device */
696 int
697 rte_eth_dev_detach(uint8_t port_id, char *name)
698 {
699         struct rte_pci_addr addr;
700         int ret;
701
702         if (name == NULL)
703                 return -EINVAL;
704
705         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
706                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
707                 if (ret < 0)
708                         return ret;
709
710                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
711                 if (ret == 0)
712                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
713                                 "%04x:%02x:%02x.%d",
714                                 addr.domain, addr.bus,
715                                 addr.devid, addr.function);
716
717                 return ret;
718         } else
719                 return rte_eth_dev_detach_vdev(port_id, name);
720 }
721
722 static int
723 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
724 {
725         uint16_t old_nb_queues = dev->data->nb_rx_queues;
726         void **rxq;
727         unsigned i;
728
729         if (dev->data->rx_queues == NULL) { /* first time configuration */
730                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
731                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
732                                 RTE_CACHE_LINE_SIZE);
733                 if (dev->data->rx_queues == NULL) {
734                         dev->data->nb_rx_queues = 0;
735                         return -(ENOMEM);
736                 }
737         } else { /* re-configure */
738                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
739
740                 rxq = dev->data->rx_queues;
741
742                 for (i = nb_queues; i < old_nb_queues; i++)
743                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
744                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
745                                 RTE_CACHE_LINE_SIZE);
746                 if (rxq == NULL)
747                         return -(ENOMEM);
748                 if (nb_queues > old_nb_queues) {
749                         uint16_t new_qs = nb_queues - old_nb_queues;
750
751                         memset(rxq + old_nb_queues, 0,
752                                 sizeof(rxq[0]) * new_qs);
753                 }
754
755                 dev->data->rx_queues = rxq;
756
757         }
758         dev->data->nb_rx_queues = nb_queues;
759         return 0;
760 }
761
762 int
763 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
764 {
765         struct rte_eth_dev *dev;
766
767         /* This function is only safe when called from the primary process
768          * in a multi-process setup*/
769         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
770
771         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
772
773         dev = &rte_eth_devices[port_id];
774         if (rx_queue_id >= dev->data->nb_rx_queues) {
775                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
776                 return -EINVAL;
777         }
778
779         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
780
781         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
782
783 }
784
785 int
786 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
787 {
788         struct rte_eth_dev *dev;
789
790         /* This function is only safe when called from the primary process
791          * in a multi-process setup*/
792         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
793
794         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
795
796         dev = &rte_eth_devices[port_id];
797         if (rx_queue_id >= dev->data->nb_rx_queues) {
798                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
799                 return -EINVAL;
800         }
801
802         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
803
804         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
805
806 }
807
808 int
809 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
810 {
811         struct rte_eth_dev *dev;
812
813         /* This function is only safe when called from the primary process
814          * in a multi-process setup*/
815         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
816
817         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
818
819         dev = &rte_eth_devices[port_id];
820         if (tx_queue_id >= dev->data->nb_tx_queues) {
821                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
822                 return -EINVAL;
823         }
824
825         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
826
827         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
828
829 }
830
831 int
832 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
833 {
834         struct rte_eth_dev *dev;
835
836         /* This function is only safe when called from the primary process
837          * in a multi-process setup*/
838         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
839
840         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
841
842         dev = &rte_eth_devices[port_id];
843         if (tx_queue_id >= dev->data->nb_tx_queues) {
844                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
845                 return -EINVAL;
846         }
847
848         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
849
850         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
851
852 }
853
854 static int
855 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
856 {
857         uint16_t old_nb_queues = dev->data->nb_tx_queues;
858         void **txq;
859         unsigned i;
860
861         if (dev->data->tx_queues == NULL) { /* first time configuration */
862                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
863                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
864                                                    RTE_CACHE_LINE_SIZE);
865                 if (dev->data->tx_queues == NULL) {
866                         dev->data->nb_tx_queues = 0;
867                         return -(ENOMEM);
868                 }
869         } else { /* re-configure */
870                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
871
872                 txq = dev->data->tx_queues;
873
874                 for (i = nb_queues; i < old_nb_queues; i++)
875                         (*dev->dev_ops->tx_queue_release)(txq[i]);
876                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
877                                   RTE_CACHE_LINE_SIZE);
878                 if (txq == NULL)
879                         return -ENOMEM;
880                 if (nb_queues > old_nb_queues) {
881                         uint16_t new_qs = nb_queues - old_nb_queues;
882
883                         memset(txq + old_nb_queues, 0,
884                                sizeof(txq[0]) * new_qs);
885                 }
886
887                 dev->data->tx_queues = txq;
888
889         }
890         dev->data->nb_tx_queues = nb_queues;
891         return 0;
892 }
893
894 int
895 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
896                       const struct rte_eth_conf *dev_conf)
897 {
898         struct rte_eth_dev *dev;
899         struct rte_eth_dev_info dev_info;
900         int diag;
901
902         /* This function is only safe when called from the primary process
903          * in a multi-process setup*/
904         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
905
906         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
907
908         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
909                 PMD_DEBUG_TRACE(
910                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
911                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
912                 return -EINVAL;
913         }
914
915         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
916                 PMD_DEBUG_TRACE(
917                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
918                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
919                 return -EINVAL;
920         }
921
922         dev = &rte_eth_devices[port_id];
923
924         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
925         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
926
927         if (dev->data->dev_started) {
928                 PMD_DEBUG_TRACE(
929                     "port %d must be stopped to allow configuration\n", port_id);
930                 return -EBUSY;
931         }
932
933         /*
934          * Check that the numbers of RX and TX queues are not greater
935          * than the maximum number of RX and TX queues supported by the
936          * configured device.
937          */
938         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
939         if (nb_rx_q > dev_info.max_rx_queues) {
940                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
941                                 port_id, nb_rx_q, dev_info.max_rx_queues);
942                 return -EINVAL;
943         }
944         if (nb_rx_q == 0) {
945                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
946                 return -EINVAL;
947         }
948
949         if (nb_tx_q > dev_info.max_tx_queues) {
950                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
951                                 port_id, nb_tx_q, dev_info.max_tx_queues);
952                 return -EINVAL;
953         }
954         if (nb_tx_q == 0) {
955                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
956                 return -EINVAL;
957         }
958
959         /* Copy the dev_conf parameter into the dev structure */
960         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
961
962         /*
963          * If link state interrupt is enabled, check that the
964          * device supports it.
965          */
966         if ((dev_conf->intr_conf.lsc == 1) &&
967                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
968                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
969                                         dev->data->drv_name);
970                         return -EINVAL;
971         }
972
973         /*
974          * If jumbo frames are enabled, check that the maximum RX packet
975          * length is supported by the configured device.
976          */
977         if (dev_conf->rxmode.jumbo_frame == 1) {
978                 if (dev_conf->rxmode.max_rx_pkt_len >
979                     dev_info.max_rx_pktlen) {
980                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
981                                 " > max valid value %u\n",
982                                 port_id,
983                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
984                                 (unsigned)dev_info.max_rx_pktlen);
985                         return -EINVAL;
986                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
987                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
988                                 " < min valid value %u\n",
989                                 port_id,
990                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
991                                 (unsigned)ETHER_MIN_LEN);
992                         return -EINVAL;
993                 }
994         } else {
995                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
996                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
997                         /* Use default value */
998                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
999                                                         ETHER_MAX_LEN;
1000         }
1001
1002         /*
1003          * Setup new number of RX/TX queues and reconfigure device.
1004          */
1005         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1006         if (diag != 0) {
1007                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1008                                 port_id, diag);
1009                 return diag;
1010         }
1011
1012         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1013         if (diag != 0) {
1014                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1015                                 port_id, diag);
1016                 rte_eth_dev_rx_queue_config(dev, 0);
1017                 return diag;
1018         }
1019
1020         diag = (*dev->dev_ops->dev_configure)(dev);
1021         if (diag != 0) {
1022                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1023                                 port_id, diag);
1024                 rte_eth_dev_rx_queue_config(dev, 0);
1025                 rte_eth_dev_tx_queue_config(dev, 0);
1026                 return diag;
1027         }
1028
1029         return 0;
1030 }
1031
1032 static void
1033 rte_eth_dev_config_restore(uint8_t port_id)
1034 {
1035         struct rte_eth_dev *dev;
1036         struct rte_eth_dev_info dev_info;
1037         struct ether_addr addr;
1038         uint16_t i;
1039         uint32_t pool = 0;
1040
1041         dev = &rte_eth_devices[port_id];
1042
1043         rte_eth_dev_info_get(port_id, &dev_info);
1044
1045         if (RTE_ETH_DEV_SRIOV(dev).active)
1046                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1047
1048         /* replay MAC address configuration */
1049         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1050                 addr = dev->data->mac_addrs[i];
1051
1052                 /* skip zero address */
1053                 if (is_zero_ether_addr(&addr))
1054                         continue;
1055
1056                 /* add address to the hardware */
1057                 if  (*dev->dev_ops->mac_addr_add &&
1058                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1059                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1060                 else {
1061                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1062                                         port_id);
1063                         /* exit the loop but not return an error */
1064                         break;
1065                 }
1066         }
1067
1068         /* replay promiscuous configuration */
1069         if (rte_eth_promiscuous_get(port_id) == 1)
1070                 rte_eth_promiscuous_enable(port_id);
1071         else if (rte_eth_promiscuous_get(port_id) == 0)
1072                 rte_eth_promiscuous_disable(port_id);
1073
1074         /* replay all multicast configuration */
1075         if (rte_eth_allmulticast_get(port_id) == 1)
1076                 rte_eth_allmulticast_enable(port_id);
1077         else if (rte_eth_allmulticast_get(port_id) == 0)
1078                 rte_eth_allmulticast_disable(port_id);
1079 }
1080
1081 int
1082 rte_eth_dev_start(uint8_t port_id)
1083 {
1084         struct rte_eth_dev *dev;
1085         int diag;
1086
1087         /* This function is only safe when called from the primary process
1088          * in a multi-process setup*/
1089         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1090
1091         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1092
1093         dev = &rte_eth_devices[port_id];
1094
1095         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1096
1097         if (dev->data->dev_started != 0) {
1098                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1099                         " already started\n",
1100                         port_id);
1101                 return 0;
1102         }
1103
1104         diag = (*dev->dev_ops->dev_start)(dev);
1105         if (diag == 0)
1106                 dev->data->dev_started = 1;
1107         else
1108                 return diag;
1109
1110         rte_eth_dev_config_restore(port_id);
1111
1112         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1113                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1114                 (*dev->dev_ops->link_update)(dev, 0);
1115         }
1116         return 0;
1117 }
1118
1119 void
1120 rte_eth_dev_stop(uint8_t port_id)
1121 {
1122         struct rte_eth_dev *dev;
1123
1124         /* This function is only safe when called from the primary process
1125          * in a multi-process setup*/
1126         PROC_PRIMARY_OR_RET();
1127
1128         VALID_PORTID_OR_RET(port_id);
1129         dev = &rte_eth_devices[port_id];
1130
1131         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1132
1133         if (dev->data->dev_started == 0) {
1134                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1135                         " already stopped\n",
1136                         port_id);
1137                 return;
1138         }
1139
1140         dev->data->dev_started = 0;
1141         (*dev->dev_ops->dev_stop)(dev);
1142 }
1143
1144 int
1145 rte_eth_dev_set_link_up(uint8_t port_id)
1146 {
1147         struct rte_eth_dev *dev;
1148
1149         /* This function is only safe when called from the primary process
1150          * in a multi-process setup*/
1151         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1152
1153         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1154
1155         dev = &rte_eth_devices[port_id];
1156
1157         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1158         return (*dev->dev_ops->dev_set_link_up)(dev);
1159 }
1160
1161 int
1162 rte_eth_dev_set_link_down(uint8_t port_id)
1163 {
1164         struct rte_eth_dev *dev;
1165
1166         /* This function is only safe when called from the primary process
1167          * in a multi-process setup*/
1168         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1169
1170         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1171
1172         dev = &rte_eth_devices[port_id];
1173
1174         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1175         return (*dev->dev_ops->dev_set_link_down)(dev);
1176 }
1177
1178 void
1179 rte_eth_dev_close(uint8_t port_id)
1180 {
1181         struct rte_eth_dev *dev;
1182
1183         /* This function is only safe when called from the primary process
1184          * in a multi-process setup*/
1185         PROC_PRIMARY_OR_RET();
1186
1187         VALID_PORTID_OR_RET(port_id);
1188         dev = &rte_eth_devices[port_id];
1189
1190         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1191         dev->data->dev_started = 0;
1192         (*dev->dev_ops->dev_close)(dev);
1193
1194         rte_free(dev->data->rx_queues);
1195         dev->data->rx_queues = NULL;
1196         rte_free(dev->data->tx_queues);
1197         dev->data->tx_queues = NULL;
1198 }
1199
1200 int
1201 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1202                        uint16_t nb_rx_desc, unsigned int socket_id,
1203                        const struct rte_eth_rxconf *rx_conf,
1204                        struct rte_mempool *mp)
1205 {
1206         int ret;
1207         uint32_t mbp_buf_size;
1208         struct rte_eth_dev *dev;
1209         struct rte_eth_dev_info dev_info;
1210
1211         /* This function is only safe when called from the primary process
1212          * in a multi-process setup*/
1213         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1214
1215         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1216
1217         dev = &rte_eth_devices[port_id];
1218         if (rx_queue_id >= dev->data->nb_rx_queues) {
1219                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1220                 return -EINVAL;
1221         }
1222
1223         if (dev->data->dev_started) {
1224                 PMD_DEBUG_TRACE(
1225                     "port %d must be stopped to allow configuration\n", port_id);
1226                 return -EBUSY;
1227         }
1228
1229         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1230         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1231
1232         /*
1233          * Check the size of the mbuf data buffer.
1234          * This value must be provided in the private data of the memory pool.
1235          * First check that the memory pool has a valid private data.
1236          */
1237         rte_eth_dev_info_get(port_id, &dev_info);
1238         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1239                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1240                                 mp->name, (int) mp->private_data_size,
1241                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1242                 return -ENOSPC;
1243         }
1244         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1245
1246         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1247                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1248                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1249                                 "=%d)\n",
1250                                 mp->name,
1251                                 (int)mbp_buf_size,
1252                                 (int)(RTE_PKTMBUF_HEADROOM +
1253                                       dev_info.min_rx_bufsize),
1254                                 (int)RTE_PKTMBUF_HEADROOM,
1255                                 (int)dev_info.min_rx_bufsize);
1256                 return -EINVAL;
1257         }
1258
1259         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1260                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1261                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1262
1263                 PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1264                         "should be: <= %hu, = %hu, and a product of %hu\n",
1265                         nb_rx_desc,
1266                         dev_info.rx_desc_lim.nb_max,
1267                         dev_info.rx_desc_lim.nb_min,
1268                         dev_info.rx_desc_lim.nb_align);
1269                 return -EINVAL;
1270         }
1271
1272         if (rx_conf == NULL)
1273                 rx_conf = &dev_info.default_rxconf;
1274
1275         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1276                                               socket_id, rx_conf, mp);
1277         if (!ret) {
1278                 if (!dev->data->min_rx_buf_size ||
1279                     dev->data->min_rx_buf_size > mbp_buf_size)
1280                         dev->data->min_rx_buf_size = mbp_buf_size;
1281         }
1282
1283         return ret;
1284 }
1285
1286 int
1287 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1288                        uint16_t nb_tx_desc, unsigned int socket_id,
1289                        const struct rte_eth_txconf *tx_conf)
1290 {
1291         struct rte_eth_dev *dev;
1292         struct rte_eth_dev_info dev_info;
1293
1294         /* This function is only safe when called from the primary process
1295          * in a multi-process setup*/
1296         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1297
1298         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1299
1300         dev = &rte_eth_devices[port_id];
1301         if (tx_queue_id >= dev->data->nb_tx_queues) {
1302                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1303                 return -EINVAL;
1304         }
1305
1306         if (dev->data->dev_started) {
1307                 PMD_DEBUG_TRACE(
1308                     "port %d must be stopped to allow configuration\n", port_id);
1309                 return -EBUSY;
1310         }
1311
1312         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1313         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1314
1315         rte_eth_dev_info_get(port_id, &dev_info);
1316
1317         if (tx_conf == NULL)
1318                 tx_conf = &dev_info.default_txconf;
1319
1320         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1321                                                socket_id, tx_conf);
1322 }
1323
1324 void
1325 rte_eth_promiscuous_enable(uint8_t port_id)
1326 {
1327         struct rte_eth_dev *dev;
1328
1329         VALID_PORTID_OR_RET(port_id);
1330         dev = &rte_eth_devices[port_id];
1331
1332         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1333         (*dev->dev_ops->promiscuous_enable)(dev);
1334         dev->data->promiscuous = 1;
1335 }
1336
1337 void
1338 rte_eth_promiscuous_disable(uint8_t port_id)
1339 {
1340         struct rte_eth_dev *dev;
1341
1342         VALID_PORTID_OR_RET(port_id);
1343         dev = &rte_eth_devices[port_id];
1344
1345         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1346         dev->data->promiscuous = 0;
1347         (*dev->dev_ops->promiscuous_disable)(dev);
1348 }
1349
1350 int
1351 rte_eth_promiscuous_get(uint8_t port_id)
1352 {
1353         struct rte_eth_dev *dev;
1354
1355         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1356
1357         dev = &rte_eth_devices[port_id];
1358         return dev->data->promiscuous;
1359 }
1360
1361 void
1362 rte_eth_allmulticast_enable(uint8_t port_id)
1363 {
1364         struct rte_eth_dev *dev;
1365
1366         VALID_PORTID_OR_RET(port_id);
1367         dev = &rte_eth_devices[port_id];
1368
1369         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1370         (*dev->dev_ops->allmulticast_enable)(dev);
1371         dev->data->all_multicast = 1;
1372 }
1373
1374 void
1375 rte_eth_allmulticast_disable(uint8_t port_id)
1376 {
1377         struct rte_eth_dev *dev;
1378
1379         VALID_PORTID_OR_RET(port_id);
1380         dev = &rte_eth_devices[port_id];
1381
1382         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1383         dev->data->all_multicast = 0;
1384         (*dev->dev_ops->allmulticast_disable)(dev);
1385 }
1386
1387 int
1388 rte_eth_allmulticast_get(uint8_t port_id)
1389 {
1390         struct rte_eth_dev *dev;
1391
1392         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1393
1394         dev = &rte_eth_devices[port_id];
1395         return dev->data->all_multicast;
1396 }
1397
1398 static inline int
1399 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1400                                 struct rte_eth_link *link)
1401 {
1402         struct rte_eth_link *dst = link;
1403         struct rte_eth_link *src = &(dev->data->dev_link);
1404
1405         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1406                                         *(uint64_t *)src) == 0)
1407                 return -1;
1408
1409         return 0;
1410 }
1411
1412 void
1413 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1414 {
1415         struct rte_eth_dev *dev;
1416
1417         VALID_PORTID_OR_RET(port_id);
1418         dev = &rte_eth_devices[port_id];
1419
1420         if (dev->data->dev_conf.intr_conf.lsc != 0)
1421                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1422         else {
1423                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1424                 (*dev->dev_ops->link_update)(dev, 1);
1425                 *eth_link = dev->data->dev_link;
1426         }
1427 }
1428
1429 void
1430 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1431 {
1432         struct rte_eth_dev *dev;
1433
1434         VALID_PORTID_OR_RET(port_id);
1435         dev = &rte_eth_devices[port_id];
1436
1437         if (dev->data->dev_conf.intr_conf.lsc != 0)
1438                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1439         else {
1440                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1441                 (*dev->dev_ops->link_update)(dev, 0);
1442                 *eth_link = dev->data->dev_link;
1443         }
1444 }
1445
1446 int
1447 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1448 {
1449         struct rte_eth_dev *dev;
1450
1451         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1452
1453         dev = &rte_eth_devices[port_id];
1454         memset(stats, 0, sizeof(*stats));
1455
1456         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1457         (*dev->dev_ops->stats_get)(dev, stats);
1458         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1459         return 0;
1460 }
1461
1462 void
1463 rte_eth_stats_reset(uint8_t port_id)
1464 {
1465         struct rte_eth_dev *dev;
1466
1467         VALID_PORTID_OR_RET(port_id);
1468         dev = &rte_eth_devices[port_id];
1469
1470         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1471         (*dev->dev_ops->stats_reset)(dev);
1472 }
1473
1474 /* retrieve ethdev extended statistics */
1475 int
1476 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1477         unsigned n)
1478 {
1479         struct rte_eth_stats eth_stats;
1480         struct rte_eth_dev *dev;
1481         unsigned count = 0, i, q;
1482         signed xcount = 0;
1483         uint64_t val, *stats_ptr;
1484
1485         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1486
1487         dev = &rte_eth_devices[port_id];
1488
1489         /* Return generic statistics */
1490         count = RTE_NB_STATS;
1491
1492         /* implemented by the driver */
1493         if (dev->dev_ops->xstats_get != NULL) {
1494                 /* Retrieve the xstats from the driver at the end of the
1495                  * xstats struct.
1496                  */
1497                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1498                          (n > count) ? n - count : 0);
1499
1500                 if (xcount < 0)
1501                         return xcount;
1502         } else {
1503                 count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1504                 count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1505         }
1506
1507         if (n < count + xcount)
1508                 return count + xcount;
1509
1510         /* now fill the xstats structure */
1511         count = 0;
1512         rte_eth_stats_get(port_id, &eth_stats);
1513
1514         /* global stats */
1515         for (i = 0; i < RTE_NB_STATS; i++) {
1516                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1517                                         rte_stats_strings[i].offset);
1518                 val = *stats_ptr;
1519                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1520                         "%s", rte_stats_strings[i].name);
1521                 xstats[count++].value = val;
1522         }
1523
1524         /* if xstats_get() is implemented by the PMD, the Q stats are done */
1525         if (dev->dev_ops->xstats_get != NULL)
1526                 return count + xcount;
1527
1528         /* per-rxq stats */
1529         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1530                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1531                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1532                                         rte_rxq_stats_strings[i].offset +
1533                                         q * sizeof(uint64_t));
1534                         val = *stats_ptr;
1535                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1536                                 "rx_q%u_%s", q,
1537                                 rte_rxq_stats_strings[i].name);
1538                         xstats[count++].value = val;
1539                 }
1540         }
1541
1542         /* per-txq stats */
1543         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1544                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1545                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1546                                         rte_txq_stats_strings[i].offset +
1547                                         q * sizeof(uint64_t));
1548                         val = *stats_ptr;
1549                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1550                                 "tx_q%u_%s", q,
1551                                 rte_txq_stats_strings[i].name);
1552                         xstats[count++].value = val;
1553                 }
1554         }
1555
1556         return count + xcount;
1557 }
1558
1559 /* reset ethdev extended statistics */
1560 void
1561 rte_eth_xstats_reset(uint8_t port_id)
1562 {
1563         struct rte_eth_dev *dev;
1564
1565         VALID_PORTID_OR_RET(port_id);
1566         dev = &rte_eth_devices[port_id];
1567
1568         /* implemented by the driver */
1569         if (dev->dev_ops->xstats_reset != NULL) {
1570                 (*dev->dev_ops->xstats_reset)(dev);
1571                 return;
1572         }
1573
1574         /* fallback to default */
1575         rte_eth_stats_reset(port_id);
1576 }
1577
1578 static int
1579 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1580                 uint8_t is_rx)
1581 {
1582         struct rte_eth_dev *dev;
1583
1584         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1585
1586         dev = &rte_eth_devices[port_id];
1587
1588         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1589         return (*dev->dev_ops->queue_stats_mapping_set)
1590                         (dev, queue_id, stat_idx, is_rx);
1591 }
1592
1593
1594 int
1595 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1596                 uint8_t stat_idx)
1597 {
1598         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1599                         STAT_QMAP_TX);
1600 }
1601
1602
1603 int
1604 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1605                 uint8_t stat_idx)
1606 {
1607         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1608                         STAT_QMAP_RX);
1609 }
1610
1611
1612 void
1613 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1614 {
1615         struct rte_eth_dev *dev;
1616         const struct rte_eth_desc_lim lim = {
1617                 .nb_max = UINT16_MAX,
1618                 .nb_min = 0,
1619                 .nb_align = 1,
1620         };
1621
1622         VALID_PORTID_OR_RET(port_id);
1623         dev = &rte_eth_devices[port_id];
1624
1625         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1626         dev_info->rx_desc_lim = lim;
1627         dev_info->tx_desc_lim = lim;
1628
1629         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1630         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1631         dev_info->pci_dev = dev->pci_dev;
1632         dev_info->driver_name = dev->data->drv_name;
1633 }
1634
1635 void
1636 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1637 {
1638         struct rte_eth_dev *dev;
1639
1640         VALID_PORTID_OR_RET(port_id);
1641         dev = &rte_eth_devices[port_id];
1642         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1643 }
1644
1645
1646 int
1647 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1648 {
1649         struct rte_eth_dev *dev;
1650
1651         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1652
1653         dev = &rte_eth_devices[port_id];
1654         *mtu = dev->data->mtu;
1655         return 0;
1656 }
1657
1658 int
1659 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1660 {
1661         int ret;
1662         struct rte_eth_dev *dev;
1663
1664         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1665         dev = &rte_eth_devices[port_id];
1666         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1667
1668         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1669         if (!ret)
1670                 dev->data->mtu = mtu;
1671
1672         return ret;
1673 }
1674
1675 int
1676 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1677 {
1678         struct rte_eth_dev *dev;
1679
1680         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1681         dev = &rte_eth_devices[port_id];
1682         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1683                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1684                 return -ENOSYS;
1685         }
1686
1687         if (vlan_id > 4095) {
1688                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1689                                 port_id, (unsigned) vlan_id);
1690                 return -EINVAL;
1691         }
1692         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1693
1694         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1695 }
1696
1697 int
1698 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1699 {
1700         struct rte_eth_dev *dev;
1701
1702         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1703         dev = &rte_eth_devices[port_id];
1704         if (rx_queue_id >= dev->data->nb_rx_queues) {
1705                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1706                 return -EINVAL;
1707         }
1708
1709         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1710         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1711
1712         return 0;
1713 }
1714
1715 int
1716 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1717 {
1718         struct rte_eth_dev *dev;
1719
1720         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1721         dev = &rte_eth_devices[port_id];
1722         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1723         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1724
1725         return 0;
1726 }
1727
1728 int
1729 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1730 {
1731         struct rte_eth_dev *dev;
1732         int ret = 0;
1733         int mask = 0;
1734         int cur, org = 0;
1735
1736         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1737         dev = &rte_eth_devices[port_id];
1738
1739         /*check which option changed by application*/
1740         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1741         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1742         if (cur != org) {
1743                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1744                 mask |= ETH_VLAN_STRIP_MASK;
1745         }
1746
1747         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1748         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1749         if (cur != org) {
1750                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1751                 mask |= ETH_VLAN_FILTER_MASK;
1752         }
1753
1754         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1755         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1756         if (cur != org) {
1757                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1758                 mask |= ETH_VLAN_EXTEND_MASK;
1759         }
1760
1761         /*no change*/
1762         if (mask == 0)
1763                 return ret;
1764
1765         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1766         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1767
1768         return ret;
1769 }
1770
1771 int
1772 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1773 {
1774         struct rte_eth_dev *dev;
1775         int ret = 0;
1776
1777         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1778         dev = &rte_eth_devices[port_id];
1779
1780         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1781                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1782
1783         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1784                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1785
1786         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1787                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1788
1789         return ret;
1790 }
1791
1792 int
1793 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1794 {
1795         struct rte_eth_dev *dev;
1796
1797         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1798         dev = &rte_eth_devices[port_id];
1799         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1800         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1801
1802         return 0;
1803 }
1804
1805 int
1806 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1807 {
1808         struct rte_eth_dev *dev;
1809
1810         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1811         dev = &rte_eth_devices[port_id];
1812         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1813         memset(fc_conf, 0, sizeof(*fc_conf));
1814         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1815 }
1816
1817 int
1818 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1819 {
1820         struct rte_eth_dev *dev;
1821
1822         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1823         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1824                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1825                 return -EINVAL;
1826         }
1827
1828         dev = &rte_eth_devices[port_id];
1829         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1830         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1831 }
1832
1833 int
1834 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1835 {
1836         struct rte_eth_dev *dev;
1837
1838         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1839         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1840                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1841                 return -EINVAL;
1842         }
1843
1844         dev = &rte_eth_devices[port_id];
1845         /* High water, low water validation are device specific */
1846         if  (*dev->dev_ops->priority_flow_ctrl_set)
1847                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1848         return -ENOTSUP;
1849 }
1850
1851 static int
1852 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1853                         uint16_t reta_size)
1854 {
1855         uint16_t i, num;
1856
1857         if (!reta_conf)
1858                 return -EINVAL;
1859
1860         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1861                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1862                                                         RTE_RETA_GROUP_SIZE);
1863                 return -EINVAL;
1864         }
1865
1866         num = reta_size / RTE_RETA_GROUP_SIZE;
1867         for (i = 0; i < num; i++) {
1868                 if (reta_conf[i].mask)
1869                         return 0;
1870         }
1871
1872         return -EINVAL;
1873 }
1874
1875 static int
1876 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1877                          uint16_t reta_size,
1878                          uint8_t max_rxq)
1879 {
1880         uint16_t i, idx, shift;
1881
1882         if (!reta_conf)
1883                 return -EINVAL;
1884
1885         if (max_rxq == 0) {
1886                 PMD_DEBUG_TRACE("No receive queue is available\n");
1887                 return -EINVAL;
1888         }
1889
1890         for (i = 0; i < reta_size; i++) {
1891                 idx = i / RTE_RETA_GROUP_SIZE;
1892                 shift = i % RTE_RETA_GROUP_SIZE;
1893                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1894                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1895                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1896                                 "the maximum rxq index: %u\n", idx, shift,
1897                                 reta_conf[idx].reta[shift], max_rxq);
1898                         return -EINVAL;
1899                 }
1900         }
1901
1902         return 0;
1903 }
1904
1905 int
1906 rte_eth_dev_rss_reta_update(uint8_t port_id,
1907                             struct rte_eth_rss_reta_entry64 *reta_conf,
1908                             uint16_t reta_size)
1909 {
1910         struct rte_eth_dev *dev;
1911         int ret;
1912
1913         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1914         /* Check mask bits */
1915         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1916         if (ret < 0)
1917                 return ret;
1918
1919         dev = &rte_eth_devices[port_id];
1920
1921         /* Check entry value */
1922         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1923                                 dev->data->nb_rx_queues);
1924         if (ret < 0)
1925                 return ret;
1926
1927         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1928         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1929 }
1930
1931 int
1932 rte_eth_dev_rss_reta_query(uint8_t port_id,
1933                            struct rte_eth_rss_reta_entry64 *reta_conf,
1934                            uint16_t reta_size)
1935 {
1936         struct rte_eth_dev *dev;
1937         int ret;
1938
1939         if (port_id >= nb_ports) {
1940                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1941                 return -ENODEV;
1942         }
1943
1944         /* Check mask bits */
1945         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1946         if (ret < 0)
1947                 return ret;
1948
1949         dev = &rte_eth_devices[port_id];
1950         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1951         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1952 }
1953
1954 int
1955 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1956 {
1957         struct rte_eth_dev *dev;
1958         uint16_t rss_hash_protos;
1959
1960         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1961         rss_hash_protos = rss_conf->rss_hf;
1962         if ((rss_hash_protos != 0) &&
1963             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1964                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1965                                 rss_hash_protos);
1966                 return -EINVAL;
1967         }
1968         dev = &rte_eth_devices[port_id];
1969         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1970         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1971 }
1972
1973 int
1974 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1975                               struct rte_eth_rss_conf *rss_conf)
1976 {
1977         struct rte_eth_dev *dev;
1978
1979         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1980         dev = &rte_eth_devices[port_id];
1981         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1982         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1983 }
1984
1985 int
1986 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
1987                            struct rte_eth_udp_tunnel *udp_tunnel)
1988 {
1989         struct rte_eth_dev *dev;
1990
1991         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1992         if (udp_tunnel == NULL) {
1993                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1994                 return -EINVAL;
1995         }
1996
1997         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1998                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
1999                 return -EINVAL;
2000         }
2001
2002         dev = &rte_eth_devices[port_id];
2003         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2004         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2005 }
2006
2007 int
2008 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2009                               struct rte_eth_udp_tunnel *udp_tunnel)
2010 {
2011         struct rte_eth_dev *dev;
2012
2013         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2014         dev = &rte_eth_devices[port_id];
2015
2016         if (udp_tunnel == NULL) {
2017                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2018                 return -EINVAL;
2019         }
2020
2021         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2022                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2023                 return -EINVAL;
2024         }
2025
2026         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2027         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2028 }
2029
2030 int
2031 rte_eth_led_on(uint8_t port_id)
2032 {
2033         struct rte_eth_dev *dev;
2034
2035         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2036         dev = &rte_eth_devices[port_id];
2037         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2038         return (*dev->dev_ops->dev_led_on)(dev);
2039 }
2040
2041 int
2042 rte_eth_led_off(uint8_t port_id)
2043 {
2044         struct rte_eth_dev *dev;
2045
2046         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2047         dev = &rte_eth_devices[port_id];
2048         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2049         return (*dev->dev_ops->dev_led_off)(dev);
2050 }
2051
2052 /*
2053  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2054  * an empty spot.
2055  */
2056 static int
2057 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2058 {
2059         struct rte_eth_dev_info dev_info;
2060         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2061         unsigned i;
2062
2063         rte_eth_dev_info_get(port_id, &dev_info);
2064
2065         for (i = 0; i < dev_info.max_mac_addrs; i++)
2066                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2067                         return i;
2068
2069         return -1;
2070 }
2071
2072 static const struct ether_addr null_mac_addr;
2073
2074 int
2075 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2076                         uint32_t pool)
2077 {
2078         struct rte_eth_dev *dev;
2079         int index;
2080         uint64_t pool_mask;
2081
2082         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2083         dev = &rte_eth_devices[port_id];
2084         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2085
2086         if (is_zero_ether_addr(addr)) {
2087                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2088                         port_id);
2089                 return -EINVAL;
2090         }
2091         if (pool >= ETH_64_POOLS) {
2092                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2093                 return -EINVAL;
2094         }
2095
2096         index = get_mac_addr_index(port_id, addr);
2097         if (index < 0) {
2098                 index = get_mac_addr_index(port_id, &null_mac_addr);
2099                 if (index < 0) {
2100                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2101                                 port_id);
2102                         return -ENOSPC;
2103                 }
2104         } else {
2105                 pool_mask = dev->data->mac_pool_sel[index];
2106
2107                 /* Check if both MAC address and pool is already there, and do nothing */
2108                 if (pool_mask & (1ULL << pool))
2109                         return 0;
2110         }
2111
2112         /* Update NIC */
2113         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2114
2115         /* Update address in NIC data structure */
2116         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2117
2118         /* Update pool bitmap in NIC data structure */
2119         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2120
2121         return 0;
2122 }
2123
2124 int
2125 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2126 {
2127         struct rte_eth_dev *dev;
2128         int index;
2129
2130         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2131         dev = &rte_eth_devices[port_id];
2132         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2133
2134         index = get_mac_addr_index(port_id, addr);
2135         if (index == 0) {
2136                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2137                 return -EADDRINUSE;
2138         } else if (index < 0)
2139                 return 0;  /* Do nothing if address wasn't found */
2140
2141         /* Update NIC */
2142         (*dev->dev_ops->mac_addr_remove)(dev, index);
2143
2144         /* Update address in NIC data structure */
2145         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2146
2147         /* reset pool bitmap */
2148         dev->data->mac_pool_sel[index] = 0;
2149
2150         return 0;
2151 }
2152
2153 int
2154 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2155 {
2156         struct rte_eth_dev *dev;
2157
2158         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2159
2160         if (!is_valid_assigned_ether_addr(addr))
2161                 return -EINVAL;
2162
2163         dev = &rte_eth_devices[port_id];
2164         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2165
2166         /* Update default address in NIC data structure */
2167         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2168
2169         (*dev->dev_ops->mac_addr_set)(dev, addr);
2170
2171         return 0;
2172 }
2173
2174 int
2175 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2176                                 uint16_t rx_mode, uint8_t on)
2177 {
2178         uint16_t num_vfs;
2179         struct rte_eth_dev *dev;
2180         struct rte_eth_dev_info dev_info;
2181
2182         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2183
2184         dev = &rte_eth_devices[port_id];
2185         rte_eth_dev_info_get(port_id, &dev_info);
2186
2187         num_vfs = dev_info.max_vfs;
2188         if (vf > num_vfs) {
2189                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2190                 return -EINVAL;
2191         }
2192
2193         if (rx_mode == 0) {
2194                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2195                 return -EINVAL;
2196         }
2197         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2198         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2199 }
2200
2201 /*
2202  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2203  * an empty spot.
2204  */
2205 static int
2206 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2207 {
2208         struct rte_eth_dev_info dev_info;
2209         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2210         unsigned i;
2211
2212         rte_eth_dev_info_get(port_id, &dev_info);
2213         if (!dev->data->hash_mac_addrs)
2214                 return -1;
2215
2216         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2217                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2218                         ETHER_ADDR_LEN) == 0)
2219                         return i;
2220
2221         return -1;
2222 }
2223
2224 int
2225 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2226                                 uint8_t on)
2227 {
2228         int index;
2229         int ret;
2230         struct rte_eth_dev *dev;
2231
2232         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2233
2234         dev = &rte_eth_devices[port_id];
2235         if (is_zero_ether_addr(addr)) {
2236                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2237                         port_id);
2238                 return -EINVAL;
2239         }
2240
2241         index = get_hash_mac_addr_index(port_id, addr);
2242         /* Check if it's already there, and do nothing */
2243         if ((index >= 0) && (on))
2244                 return 0;
2245
2246         if (index < 0) {
2247                 if (!on) {
2248                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2249                                 "set in UTA\n", port_id);
2250                         return -EINVAL;
2251                 }
2252
2253                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2254                 if (index < 0) {
2255                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2256                                         port_id);
2257                         return -ENOSPC;
2258                 }
2259         }
2260
2261         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2262         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2263         if (ret == 0) {
2264                 /* Update address in NIC data structure */
2265                 if (on)
2266                         ether_addr_copy(addr,
2267                                         &dev->data->hash_mac_addrs[index]);
2268                 else
2269                         ether_addr_copy(&null_mac_addr,
2270                                         &dev->data->hash_mac_addrs[index]);
2271         }
2272
2273         return ret;
2274 }
2275
2276 int
2277 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2278 {
2279         struct rte_eth_dev *dev;
2280
2281         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2282
2283         dev = &rte_eth_devices[port_id];
2284
2285         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2286         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2287 }
2288
2289 int
2290 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2291 {
2292         uint16_t num_vfs;
2293         struct rte_eth_dev *dev;
2294         struct rte_eth_dev_info dev_info;
2295
2296         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2297
2298         dev = &rte_eth_devices[port_id];
2299         rte_eth_dev_info_get(port_id, &dev_info);
2300
2301         num_vfs = dev_info.max_vfs;
2302         if (vf > num_vfs) {
2303                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2304                 return -EINVAL;
2305         }
2306
2307         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2308         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2309 }
2310
2311 int
2312 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2313 {
2314         uint16_t num_vfs;
2315         struct rte_eth_dev *dev;
2316         struct rte_eth_dev_info dev_info;
2317
2318         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2319
2320         dev = &rte_eth_devices[port_id];
2321         rte_eth_dev_info_get(port_id, &dev_info);
2322
2323         num_vfs = dev_info.max_vfs;
2324         if (vf > num_vfs) {
2325                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2326                 return -EINVAL;
2327         }
2328
2329         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2330         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2331 }
2332
2333 int
2334 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2335                                uint64_t vf_mask, uint8_t vlan_on)
2336 {
2337         struct rte_eth_dev *dev;
2338
2339         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2340
2341         dev = &rte_eth_devices[port_id];
2342
2343         if (vlan_id > ETHER_MAX_VLAN_ID) {
2344                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2345                         vlan_id);
2346                 return -EINVAL;
2347         }
2348
2349         if (vf_mask == 0) {
2350                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2351                 return -EINVAL;
2352         }
2353
2354         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2355         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2356                                                    vf_mask, vlan_on);
2357 }
2358
2359 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2360                                         uint16_t tx_rate)
2361 {
2362         struct rte_eth_dev *dev;
2363         struct rte_eth_dev_info dev_info;
2364         struct rte_eth_link link;
2365
2366         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2367
2368         dev = &rte_eth_devices[port_id];
2369         rte_eth_dev_info_get(port_id, &dev_info);
2370         link = dev->data->dev_link;
2371
2372         if (queue_idx > dev_info.max_tx_queues) {
2373                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2374                                 "invalid queue id=%d\n", port_id, queue_idx);
2375                 return -EINVAL;
2376         }
2377
2378         if (tx_rate > link.link_speed) {
2379                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2380                                 "bigger than link speed= %d\n",
2381                         tx_rate, link.link_speed);
2382                 return -EINVAL;
2383         }
2384
2385         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2386         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2387 }
2388
2389 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2390                                 uint64_t q_msk)
2391 {
2392         struct rte_eth_dev *dev;
2393         struct rte_eth_dev_info dev_info;
2394         struct rte_eth_link link;
2395
2396         if (q_msk == 0)
2397                 return 0;
2398
2399         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2400
2401         dev = &rte_eth_devices[port_id];
2402         rte_eth_dev_info_get(port_id, &dev_info);
2403         link = dev->data->dev_link;
2404
2405         if (vf > dev_info.max_vfs) {
2406                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2407                                 "invalid vf id=%d\n", port_id, vf);
2408                 return -EINVAL;
2409         }
2410
2411         if (tx_rate > link.link_speed) {
2412                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2413                                 "bigger than link speed= %d\n",
2414                                 tx_rate, link.link_speed);
2415                 return -EINVAL;
2416         }
2417
2418         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2419         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2420 }
2421
2422 int
2423 rte_eth_mirror_rule_set(uint8_t port_id,
2424                         struct rte_eth_mirror_conf *mirror_conf,
2425                         uint8_t rule_id, uint8_t on)
2426 {
2427         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2428
2429         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2430         if (mirror_conf->rule_type == 0) {
2431                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2432                 return -EINVAL;
2433         }
2434
2435         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2436                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2437                                 ETH_64_POOLS - 1);
2438                 return -EINVAL;
2439         }
2440
2441         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2442              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2443             (mirror_conf->pool_mask == 0)) {
2444                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2445                 return -EINVAL;
2446         }
2447
2448         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2449             mirror_conf->vlan.vlan_mask == 0) {
2450                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2451                 return -EINVAL;
2452         }
2453
2454         dev = &rte_eth_devices[port_id];
2455         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2456
2457         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2458 }
2459
2460 int
2461 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2462 {
2463         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2464
2465         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2466
2467         dev = &rte_eth_devices[port_id];
2468         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2469
2470         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2471 }
2472
2473 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2474 uint16_t
2475 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2476                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2477 {
2478         struct rte_eth_dev *dev;
2479
2480         VALID_PORTID_OR_ERR_RET(port_id, 0);
2481
2482         dev = &rte_eth_devices[port_id];
2483         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2484         if (queue_id >= dev->data->nb_rx_queues) {
2485                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2486                 return 0;
2487         }
2488         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2489                                                 rx_pkts, nb_pkts);
2490 }
2491
2492 uint16_t
2493 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2494                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2495 {
2496         struct rte_eth_dev *dev;
2497
2498         VALID_PORTID_OR_ERR_RET(port_id, 0);
2499
2500         dev = &rte_eth_devices[port_id];
2501
2502         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2503         if (queue_id >= dev->data->nb_tx_queues) {
2504                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2505                 return 0;
2506         }
2507         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2508                                                 tx_pkts, nb_pkts);
2509 }
2510
2511 uint32_t
2512 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2513 {
2514         struct rte_eth_dev *dev;
2515
2516         VALID_PORTID_OR_ERR_RET(port_id, 0);
2517
2518         dev = &rte_eth_devices[port_id];
2519         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2520         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2521 }
2522
2523 int
2524 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2525 {
2526         struct rte_eth_dev *dev;
2527
2528         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2529
2530         dev = &rte_eth_devices[port_id];
2531         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2532         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2533                                                    offset);
2534 }
2535 #endif
2536
2537 int
2538 rte_eth_dev_callback_register(uint8_t port_id,
2539                         enum rte_eth_event_type event,
2540                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2541 {
2542         struct rte_eth_dev *dev;
2543         struct rte_eth_dev_callback *user_cb;
2544
2545         if (!cb_fn)
2546                 return -EINVAL;
2547
2548         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2549
2550         dev = &rte_eth_devices[port_id];
2551         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2552
2553         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2554                 if (user_cb->cb_fn == cb_fn &&
2555                         user_cb->cb_arg == cb_arg &&
2556                         user_cb->event == event) {
2557                         break;
2558                 }
2559         }
2560
2561         /* create a new callback. */
2562         if (user_cb == NULL)
2563                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2564                                       sizeof(struct rte_eth_dev_callback), 0);
2565         if (user_cb != NULL) {
2566                 user_cb->cb_fn = cb_fn;
2567                 user_cb->cb_arg = cb_arg;
2568                 user_cb->event = event;
2569                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2570         }
2571
2572         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2573         return (user_cb == NULL) ? -ENOMEM : 0;
2574 }
2575
2576 int
2577 rte_eth_dev_callback_unregister(uint8_t port_id,
2578                         enum rte_eth_event_type event,
2579                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2580 {
2581         int ret;
2582         struct rte_eth_dev *dev;
2583         struct rte_eth_dev_callback *cb, *next;
2584
2585         if (!cb_fn)
2586                 return -EINVAL;
2587
2588         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2589
2590         dev = &rte_eth_devices[port_id];
2591         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2592
2593         ret = 0;
2594         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2595
2596                 next = TAILQ_NEXT(cb, next);
2597
2598                 if (cb->cb_fn != cb_fn || cb->event != event ||
2599                                 (cb->cb_arg != (void *)-1 &&
2600                                 cb->cb_arg != cb_arg))
2601                         continue;
2602
2603                 /*
2604                  * if this callback is not executing right now,
2605                  * then remove it.
2606                  */
2607                 if (cb->active == 0) {
2608                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2609                         rte_free(cb);
2610                 } else {
2611                         ret = -EAGAIN;
2612                 }
2613         }
2614
2615         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2616         return ret;
2617 }
2618
2619 void
2620 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2621         enum rte_eth_event_type event)
2622 {
2623         struct rte_eth_dev_callback *cb_lst;
2624         struct rte_eth_dev_callback dev_cb;
2625
2626         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2627         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2628                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2629                         continue;
2630                 dev_cb = *cb_lst;
2631                 cb_lst->active = 1;
2632                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2633                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2634                                                 dev_cb.cb_arg);
2635                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2636                 cb_lst->active = 0;
2637         }
2638         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2639 }
2640
2641 int
2642 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2643 {
2644         uint32_t vec;
2645         struct rte_eth_dev *dev;
2646         struct rte_intr_handle *intr_handle;
2647         uint16_t qid;
2648         int rc;
2649
2650         if (!rte_eth_dev_is_valid_port(port_id)) {
2651                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2652                 return -ENODEV;
2653         }
2654
2655         dev = &rte_eth_devices[port_id];
2656         intr_handle = &dev->pci_dev->intr_handle;
2657         if (!intr_handle->intr_vec) {
2658                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2659                 return -EPERM;
2660         }
2661
2662         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2663                 vec = intr_handle->intr_vec[qid];
2664                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2665                 if (rc && rc != -EEXIST) {
2666                         PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2667                                         " op %d epfd %d vec %u\n",
2668                                         port_id, qid, op, epfd, vec);
2669                 }
2670         }
2671
2672         return 0;
2673 }
2674
2675 int
2676 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2677                           int epfd, int op, void *data)
2678 {
2679         uint32_t vec;
2680         struct rte_eth_dev *dev;
2681         struct rte_intr_handle *intr_handle;
2682         int rc;
2683
2684         if (!rte_eth_dev_is_valid_port(port_id)) {
2685                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2686                 return -ENODEV;
2687         }
2688
2689         dev = &rte_eth_devices[port_id];
2690         if (queue_id >= dev->data->nb_rx_queues) {
2691                 PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2692                 return -EINVAL;
2693         }
2694
2695         intr_handle = &dev->pci_dev->intr_handle;
2696         if (!intr_handle->intr_vec) {
2697                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2698                 return -EPERM;
2699         }
2700
2701         vec = intr_handle->intr_vec[queue_id];
2702         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2703         if (rc && rc != -EEXIST) {
2704                 PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2705                                 " op %d epfd %d vec %u\n",
2706                                 port_id, queue_id, op, epfd, vec);
2707                 return rc;
2708         }
2709
2710         return 0;
2711 }
2712
2713 int
2714 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2715                            uint16_t queue_id)
2716 {
2717         struct rte_eth_dev *dev;
2718
2719         if (!rte_eth_dev_is_valid_port(port_id)) {
2720                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2721                 return -ENODEV;
2722         }
2723
2724         dev = &rte_eth_devices[port_id];
2725
2726         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2727         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2728 }
2729
2730 int
2731 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2732                             uint16_t queue_id)
2733 {
2734         struct rte_eth_dev *dev;
2735
2736         if (!rte_eth_dev_is_valid_port(port_id)) {
2737                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2738                 return -ENODEV;
2739         }
2740
2741         dev = &rte_eth_devices[port_id];
2742
2743         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2744         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2745 }
2746
2747 #ifdef RTE_NIC_BYPASS
2748 int rte_eth_dev_bypass_init(uint8_t port_id)
2749 {
2750         struct rte_eth_dev *dev;
2751
2752         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2753
2754         dev = &rte_eth_devices[port_id];
2755         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2756         (*dev->dev_ops->bypass_init)(dev);
2757         return 0;
2758 }
2759
2760 int
2761 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2762 {
2763         struct rte_eth_dev *dev;
2764
2765         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2766
2767         dev = &rte_eth_devices[port_id];
2768         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2769         (*dev->dev_ops->bypass_state_show)(dev, state);
2770         return 0;
2771 }
2772
2773 int
2774 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2775 {
2776         struct rte_eth_dev *dev;
2777
2778         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2779
2780         dev = &rte_eth_devices[port_id];
2781         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2782         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2783         return 0;
2784 }
2785
2786 int
2787 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2788 {
2789         struct rte_eth_dev *dev;
2790
2791         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2792
2793         dev = &rte_eth_devices[port_id];
2794         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2795         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2796         return 0;
2797 }
2798
2799 int
2800 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2801 {
2802         struct rte_eth_dev *dev;
2803
2804         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2805
2806         dev = &rte_eth_devices[port_id];
2807
2808         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2809         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2810         return 0;
2811 }
2812
2813 int
2814 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2815 {
2816         struct rte_eth_dev *dev;
2817
2818         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2819
2820         dev = &rte_eth_devices[port_id];
2821
2822         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2823         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2824         return 0;
2825 }
2826
2827 int
2828 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2829 {
2830         struct rte_eth_dev *dev;
2831
2832         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2833
2834         dev = &rte_eth_devices[port_id];
2835
2836         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2837         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2838         return 0;
2839 }
2840
2841 int
2842 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2843 {
2844         struct rte_eth_dev *dev;
2845
2846         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2847
2848         dev = &rte_eth_devices[port_id];
2849
2850         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2851         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2852         return 0;
2853 }
2854
2855 int
2856 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2857 {
2858         struct rte_eth_dev *dev;
2859
2860         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2861
2862         dev = &rte_eth_devices[port_id];
2863
2864         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2865         (*dev->dev_ops->bypass_wd_reset)(dev);
2866         return 0;
2867 }
2868 #endif
2869
2870 int
2871 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2872 {
2873         struct rte_eth_dev *dev;
2874
2875         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2876
2877         dev = &rte_eth_devices[port_id];
2878         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2879         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2880                                 RTE_ETH_FILTER_NOP, NULL);
2881 }
2882
2883 int
2884 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2885                        enum rte_filter_op filter_op, void *arg)
2886 {
2887         struct rte_eth_dev *dev;
2888
2889         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2890
2891         dev = &rte_eth_devices[port_id];
2892         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2893         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2894 }
2895
2896 void *
2897 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2898                 rte_rx_callback_fn fn, void *user_param)
2899 {
2900 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2901         rte_errno = ENOTSUP;
2902         return NULL;
2903 #endif
2904         /* check input parameters */
2905         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2906                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2907                 rte_errno = EINVAL;
2908                 return NULL;
2909         }
2910
2911         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2912
2913         if (cb == NULL) {
2914                 rte_errno = ENOMEM;
2915                 return NULL;
2916         }
2917
2918         cb->fn.rx = fn;
2919         cb->param = user_param;
2920
2921         /* Add the callbacks in fifo order. */
2922         struct rte_eth_rxtx_callback *tail =
2923                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2924
2925         if (!tail) {
2926                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2927
2928         } else {
2929                 while (tail->next)
2930                         tail = tail->next;
2931                 tail->next = cb;
2932         }
2933
2934         return cb;
2935 }
2936
2937 void *
2938 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2939                 rte_tx_callback_fn fn, void *user_param)
2940 {
2941 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2942         rte_errno = ENOTSUP;
2943         return NULL;
2944 #endif
2945         /* check input parameters */
2946         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2947                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2948                 rte_errno = EINVAL;
2949                 return NULL;
2950         }
2951
2952         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2953
2954         if (cb == NULL) {
2955                 rte_errno = ENOMEM;
2956                 return NULL;
2957         }
2958
2959         cb->fn.tx = fn;
2960         cb->param = user_param;
2961
2962         /* Add the callbacks in fifo order. */
2963         struct rte_eth_rxtx_callback *tail =
2964                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2965
2966         if (!tail) {
2967                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2968
2969         } else {
2970                 while (tail->next)
2971                         tail = tail->next;
2972                 tail->next = cb;
2973         }
2974
2975         return cb;
2976 }
2977
2978 int
2979 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2980                 struct rte_eth_rxtx_callback *user_cb)
2981 {
2982 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2983         return -ENOTSUP;
2984 #endif
2985         /* Check input parameters. */
2986         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
2987                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2988                 return -EINVAL;
2989         }
2990
2991         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2992         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2993         struct rte_eth_rxtx_callback *prev_cb;
2994
2995         /* Reset head pointer and remove user cb if first in the list. */
2996         if (cb == user_cb) {
2997                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
2998                 return 0;
2999         }
3000
3001         /* Remove the user cb from the callback list. */
3002         do {
3003                 prev_cb = cb;
3004                 cb = cb->next;
3005
3006                 if (cb == user_cb) {
3007                         prev_cb->next = user_cb->next;
3008                         return 0;
3009                 }
3010
3011         } while (cb != NULL);
3012
3013         /* Callback wasn't found. */
3014         return -EINVAL;
3015 }
3016
3017 int
3018 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3019                 struct rte_eth_rxtx_callback *user_cb)
3020 {
3021 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3022         return -ENOTSUP;
3023 #endif
3024         /* Check input parameters. */
3025         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3026                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3027                 return -EINVAL;
3028         }
3029
3030         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3031         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3032         struct rte_eth_rxtx_callback *prev_cb;
3033
3034         /* Reset head pointer and remove user cb if first in the list. */
3035         if (cb == user_cb) {
3036                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3037                 return 0;
3038         }
3039
3040         /* Remove the user cb from the callback list. */
3041         do {
3042                 prev_cb = cb;
3043                 cb = cb->next;
3044
3045                 if (cb == user_cb) {
3046                         prev_cb->next = user_cb->next;
3047                         return 0;
3048                 }
3049
3050         } while (cb != NULL);
3051
3052         /* Callback wasn't found. */
3053         return -EINVAL;
3054 }
3055
3056 int
3057 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3058         struct rte_eth_rxq_info *qinfo)
3059 {
3060         struct rte_eth_dev *dev;
3061
3062         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3063
3064         if (qinfo == NULL)
3065                 return -EINVAL;
3066
3067         dev = &rte_eth_devices[port_id];
3068         if (queue_id >= dev->data->nb_rx_queues) {
3069                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3070                 return -EINVAL;
3071         }
3072
3073         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3074
3075         memset(qinfo, 0, sizeof(*qinfo));
3076         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3077         return 0;
3078 }
3079
3080 int
3081 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3082         struct rte_eth_txq_info *qinfo)
3083 {
3084         struct rte_eth_dev *dev;
3085
3086         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3087
3088         if (qinfo == NULL)
3089                 return -EINVAL;
3090
3091         dev = &rte_eth_devices[port_id];
3092         if (queue_id >= dev->data->nb_tx_queues) {
3093                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3094                 return -EINVAL;
3095         }
3096
3097         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3098
3099         memset(qinfo, 0, sizeof(*qinfo));
3100         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3101         return 0;
3102 }
3103
3104 int
3105 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3106                              struct ether_addr *mc_addr_set,
3107                              uint32_t nb_mc_addr)
3108 {
3109         struct rte_eth_dev *dev;
3110
3111         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3112
3113         dev = &rte_eth_devices[port_id];
3114         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3115         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3116 }
3117
3118 int
3119 rte_eth_timesync_enable(uint8_t port_id)
3120 {
3121         struct rte_eth_dev *dev;
3122
3123         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3124         dev = &rte_eth_devices[port_id];
3125
3126         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3127         return (*dev->dev_ops->timesync_enable)(dev);
3128 }
3129
3130 int
3131 rte_eth_timesync_disable(uint8_t port_id)
3132 {
3133         struct rte_eth_dev *dev;
3134
3135         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3136         dev = &rte_eth_devices[port_id];
3137
3138         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3139         return (*dev->dev_ops->timesync_disable)(dev);
3140 }
3141
3142 int
3143 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3144                                    uint32_t flags)
3145 {
3146         struct rte_eth_dev *dev;
3147
3148         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3149         dev = &rte_eth_devices[port_id];
3150
3151         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3152         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3153 }
3154
3155 int
3156 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3157 {
3158         struct rte_eth_dev *dev;
3159
3160         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3161         dev = &rte_eth_devices[port_id];
3162
3163         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3164         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3165 }
3166
3167 int
3168 rte_eth_dev_get_reg_length(uint8_t port_id)
3169 {
3170         struct rte_eth_dev *dev;
3171
3172         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3173
3174         dev = &rte_eth_devices[port_id];
3175         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3176         return (*dev->dev_ops->get_reg_length)(dev);
3177 }
3178
3179 int
3180 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3181 {
3182         struct rte_eth_dev *dev;
3183
3184         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3185
3186         dev = &rte_eth_devices[port_id];
3187         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3188         return (*dev->dev_ops->get_reg)(dev, info);
3189 }
3190
3191 int
3192 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3193 {
3194         struct rte_eth_dev *dev;
3195
3196         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3197
3198         dev = &rte_eth_devices[port_id];
3199         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3200         return (*dev->dev_ops->get_eeprom_length)(dev);
3201 }
3202
3203 int
3204 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3205 {
3206         struct rte_eth_dev *dev;
3207
3208         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3209
3210         dev = &rte_eth_devices[port_id];
3211         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3212         return (*dev->dev_ops->get_eeprom)(dev, info);
3213 }
3214
3215 int
3216 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3217 {
3218         struct rte_eth_dev *dev;
3219
3220         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3221
3222         dev = &rte_eth_devices[port_id];
3223         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3224         return (*dev->dev_ops->set_eeprom)(dev, info);
3225 }
3226
3227 int
3228 rte_eth_dev_get_dcb_info(uint8_t port_id,
3229                              struct rte_eth_dcb_info *dcb_info)
3230 {
3231         struct rte_eth_dev *dev;
3232
3233         if (!rte_eth_dev_is_valid_port(port_id)) {
3234                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3235                 return -ENODEV;
3236         }
3237
3238         dev = &rte_eth_devices[port_id];
3239         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3240
3241         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3242         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3243 }
3244
3245 void
3246 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3247 {
3248         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3249                 PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3250                                 eth_dev, pci_dev);
3251         }
3252
3253         eth_dev->data->dev_flags = 0;
3254         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3255                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3256         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3257                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3258
3259         eth_dev->data->kdrv = pci_dev->kdrv;
3260         eth_dev->data->numa_node = pci_dev->numa_node;
3261         eth_dev->data->drv_name = pci_dev->driver->name;
3262 }