ethdev: fix error handling in PCI fields copy
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
145         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
146         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
147                 rx_nombuf)},
148 };
149
150 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
151
152 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
153         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
154         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
155         {"errors", offsetof(struct rte_eth_stats, q_errors)},
156 };
157
158 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
159                 sizeof(rte_rxq_stats_strings[0]))
160
161 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
162         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
163         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
164 };
165 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
166                 sizeof(rte_txq_stats_strings[0]))
167
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 enum {
189         DEV_DETACHED = 0,
190         DEV_ATTACHED
191 };
192
193 static void
194 rte_eth_dev_data_alloc(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
200                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
202                                 rte_socket_id(), flags);
203         } else
204                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
205         if (mz == NULL)
206                 rte_panic("Cannot allocate memzone for ethernet port data\n");
207
208         rte_eth_dev_data = mz->addr;
209         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210                 memset(rte_eth_dev_data, 0,
211                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
212 }
213
214 struct rte_eth_dev *
215 rte_eth_dev_allocated(const char *name)
216 {
217         unsigned i;
218
219         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
220                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
221                     strcmp(rte_eth_devices[i].data->name, name) == 0)
222                         return &rte_eth_devices[i];
223         }
224         return NULL;
225 }
226
227 static uint8_t
228 rte_eth_dev_find_free_port(void)
229 {
230         unsigned i;
231
232         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
233                 if (rte_eth_devices[i].attached == DEV_DETACHED)
234                         return i;
235         }
236         return RTE_MAX_ETHPORTS;
237 }
238
239 struct rte_eth_dev *
240 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
241 {
242         uint8_t port_id;
243         struct rte_eth_dev *eth_dev;
244
245         port_id = rte_eth_dev_find_free_port();
246         if (port_id == RTE_MAX_ETHPORTS) {
247                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
248                 return NULL;
249         }
250
251         if (rte_eth_dev_data == NULL)
252                 rte_eth_dev_data_alloc();
253
254         if (rte_eth_dev_allocated(name) != NULL) {
255                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
256                                 name);
257                 return NULL;
258         }
259
260         eth_dev = &rte_eth_devices[port_id];
261         eth_dev->data = &rte_eth_dev_data[port_id];
262         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
263         eth_dev->data->port_id = port_id;
264         eth_dev->attached = DEV_ATTACHED;
265         eth_dev->dev_type = type;
266         nb_ports++;
267         return eth_dev;
268 }
269
270 static int
271 rte_eth_dev_create_unique_device_name(char *name, size_t size,
272                 struct rte_pci_device *pci_dev)
273 {
274         int ret;
275
276         if ((name == NULL) || (pci_dev == NULL))
277                 return -EINVAL;
278
279         ret = snprintf(name, size, "%d:%d.%d",
280                         pci_dev->addr.bus, pci_dev->addr.devid,
281                         pci_dev->addr.function);
282         if (ret < 0)
283                 return ret;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
289 {
290         if (eth_dev == NULL)
291                 return -EINVAL;
292
293         eth_dev->attached = DEV_DETACHED;
294         nb_ports--;
295         return 0;
296 }
297
298 static int
299 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
300                  struct rte_pci_device *pci_dev)
301 {
302         struct eth_driver    *eth_drv;
303         struct rte_eth_dev *eth_dev;
304         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
305
306         int diag;
307
308         eth_drv = (struct eth_driver *)pci_drv;
309
310         /* Create unique Ethernet device name using PCI address */
311         rte_eth_dev_create_unique_device_name(ethdev_name,
312                         sizeof(ethdev_name), pci_dev);
313
314         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
315         if (eth_dev == NULL)
316                 return -ENOMEM;
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
319                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
320                                   eth_drv->dev_private_size,
321                                   RTE_CACHE_LINE_SIZE);
322                 if (eth_dev->data->dev_private == NULL)
323                         rte_panic("Cannot allocate memzone for private port data\n");
324         }
325         eth_dev->pci_dev = pci_dev;
326         eth_dev->driver = eth_drv;
327         eth_dev->data->rx_mbuf_alloc_failed = 0;
328
329         /* init user callbacks */
330         TAILQ_INIT(&(eth_dev->link_intr_cbs));
331
332         /*
333          * Set the default MTU.
334          */
335         eth_dev->data->mtu = ETHER_MTU;
336
337         /* Invoke PMD device initialization function */
338         diag = (*eth_drv->eth_dev_init)(eth_dev);
339         if (diag == 0)
340                 return 0;
341
342         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
343                         pci_drv->name,
344                         (unsigned) pci_dev->id.vendor_id,
345                         (unsigned) pci_dev->id.device_id);
346         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
347                 rte_free(eth_dev->data->dev_private);
348         rte_eth_dev_release_port(eth_dev);
349         return diag;
350 }
351
352 static int
353 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
354 {
355         const struct eth_driver *eth_drv;
356         struct rte_eth_dev *eth_dev;
357         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
358         int ret;
359
360         if (pci_dev == NULL)
361                 return -EINVAL;
362
363         /* Create unique Ethernet device name using PCI address */
364         rte_eth_dev_create_unique_device_name(ethdev_name,
365                         sizeof(ethdev_name), pci_dev);
366
367         eth_dev = rte_eth_dev_allocated(ethdev_name);
368         if (eth_dev == NULL)
369                 return -ENODEV;
370
371         eth_drv = (const struct eth_driver *)pci_dev->driver;
372
373         /* Invoke PMD device uninit function */
374         if (*eth_drv->eth_dev_uninit) {
375                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
376                 if (ret)
377                         return ret;
378         }
379
380         /* free ether device */
381         rte_eth_dev_release_port(eth_dev);
382
383         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
384                 rte_free(eth_dev->data->dev_private);
385
386         eth_dev->pci_dev = NULL;
387         eth_dev->driver = NULL;
388         eth_dev->data = NULL;
389
390         return 0;
391 }
392
393 /**
394  * Register an Ethernet [Poll Mode] driver.
395  *
396  * Function invoked by the initialization function of an Ethernet driver
397  * to simultaneously register itself as a PCI driver and as an Ethernet
398  * Poll Mode Driver.
399  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
400  * structure embedded in the *eth_drv* structure, after having stored the
401  * address of the rte_eth_dev_init() function in the *devinit* field of
402  * the *pci_drv* structure.
403  * During the PCI probing phase, the rte_eth_dev_init() function is
404  * invoked for each PCI [Ethernet device] matching the embedded PCI
405  * identifiers provided by the driver.
406  */
407 void
408 rte_eth_driver_register(struct eth_driver *eth_drv)
409 {
410         eth_drv->pci_drv.devinit = rte_eth_dev_init;
411         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
412         rte_eal_pci_register(&eth_drv->pci_drv);
413 }
414
415 int
416 rte_eth_dev_is_valid_port(uint8_t port_id)
417 {
418         if (port_id >= RTE_MAX_ETHPORTS ||
419             rte_eth_devices[port_id].attached != DEV_ATTACHED)
420                 return 0;
421         else
422                 return 1;
423 }
424
425 int
426 rte_eth_dev_socket_id(uint8_t port_id)
427 {
428         if (!rte_eth_dev_is_valid_port(port_id))
429                 return -1;
430         return rte_eth_devices[port_id].data->numa_node;
431 }
432
433 uint8_t
434 rte_eth_dev_count(void)
435 {
436         return nb_ports;
437 }
438
439 static enum rte_eth_dev_type
440 rte_eth_dev_get_device_type(uint8_t port_id)
441 {
442         if (!rte_eth_dev_is_valid_port(port_id))
443                 return RTE_ETH_DEV_UNKNOWN;
444         return rte_eth_devices[port_id].dev_type;
445 }
446
447 static int
448 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
449 {
450         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
451
452         if (addr == NULL) {
453                 PMD_DEBUG_TRACE("Null pointer is specified\n");
454                 return -EINVAL;
455         }
456
457         *addr = rte_eth_devices[port_id].pci_dev->addr;
458         return 0;
459 }
460
461 static int
462 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
463 {
464         char *tmp;
465
466         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
467
468         if (name == NULL) {
469                 PMD_DEBUG_TRACE("Null pointer is specified\n");
470                 return -EINVAL;
471         }
472
473         /* shouldn't check 'rte_eth_devices[i].data',
474          * because it might be overwritten by VDEV PMD */
475         tmp = rte_eth_dev_data[port_id].name;
476         strcpy(name, tmp);
477         return 0;
478 }
479
480 static int
481 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
482 {
483         int i;
484
485         if (name == NULL) {
486                 PMD_DEBUG_TRACE("Null pointer is specified\n");
487                 return -EINVAL;
488         }
489
490         *port_id = RTE_MAX_ETHPORTS;
491
492         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
493
494                 if (!strncmp(name,
495                         rte_eth_dev_data[i].name, strlen(name))) {
496
497                         *port_id = i;
498
499                         return 0;
500                 }
501         }
502         return -ENODEV;
503 }
504
505 static int
506 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
507 {
508         int i;
509         struct rte_pci_device *pci_dev = NULL;
510
511         if (addr == NULL) {
512                 PMD_DEBUG_TRACE("Null pointer is specified\n");
513                 return -EINVAL;
514         }
515
516         *port_id = RTE_MAX_ETHPORTS;
517
518         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
519
520                 pci_dev = rte_eth_devices[i].pci_dev;
521
522                 if (pci_dev &&
523                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
524
525                         *port_id = i;
526
527                         return 0;
528                 }
529         }
530         return -ENODEV;
531 }
532
533 static int
534 rte_eth_dev_is_detachable(uint8_t port_id)
535 {
536         uint32_t dev_flags;
537
538         if (!rte_eth_dev_is_valid_port(port_id)) {
539                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
540                 return -EINVAL;
541         }
542
543         switch (rte_eth_devices[port_id].data->kdrv) {
544         case RTE_KDRV_IGB_UIO:
545         case RTE_KDRV_UIO_GENERIC:
546         case RTE_KDRV_NIC_UIO:
547         case RTE_KDRV_NONE:
548                 break;
549         case RTE_KDRV_VFIO:
550         default:
551                 return -ENOTSUP;
552         }
553         dev_flags = rte_eth_devices[port_id].data->dev_flags;
554         return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
555 }
556
557 /* attach the new physical device, then store port_id of the device */
558 static int
559 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
560 {
561         if ((addr == NULL) || (port_id == NULL))
562                 goto err;
563
564         /* re-construct pci_device_list */
565         if (rte_eal_pci_scan())
566                 goto err;
567         /* Invoke probe func of the driver can handle the new device. */
568         if (rte_eal_pci_probe_one(addr))
569                 goto err;
570
571         if (rte_eth_dev_get_port_by_addr(addr, port_id))
572                 goto err;
573
574         return 0;
575 err:
576         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
577         return -1;
578 }
579
580 /* detach the new physical device, then store pci_addr of the device */
581 static int
582 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
583 {
584         struct rte_pci_addr freed_addr;
585         struct rte_pci_addr vp;
586
587         if (addr == NULL)
588                 goto err;
589
590         /* check whether the driver supports detach feature, or not */
591         if (rte_eth_dev_is_detachable(port_id))
592                 goto err;
593
594         /* get pci address by port id */
595         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
596                 goto err;
597
598         /* Zeroed pci addr means the port comes from virtual device */
599         vp.domain = vp.bus = vp.devid = vp.function = 0;
600         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
601                 goto err;
602
603         /* invoke devuninit func of the pci driver,
604          * also remove the device from pci_device_list */
605         if (rte_eal_pci_detach(&freed_addr))
606                 goto err;
607
608         *addr = freed_addr;
609         return 0;
610 err:
611         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
612         return -1;
613 }
614
615 /* attach the new virtual device, then store port_id of the device */
616 static int
617 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
618 {
619         char *name = NULL, *args = NULL;
620         int ret = -1;
621
622         if ((vdevargs == NULL) || (port_id == NULL))
623                 goto end;
624
625         /* parse vdevargs, then retrieve device name and args */
626         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
627                 goto end;
628
629         /* walk around dev_driver_list to find the driver of the device,
630          * then invoke probe function of the driver.
631          * rte_eal_vdev_init() updates port_id allocated after
632          * initialization.
633          */
634         if (rte_eal_vdev_init(name, args))
635                 goto end;
636
637         if (rte_eth_dev_get_port_by_name(name, port_id))
638                 goto end;
639
640         ret = 0;
641 end:
642         if (name)
643                 free(name);
644         if (args)
645                 free(args);
646
647         if (ret < 0)
648                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
649         return ret;
650 }
651
652 /* detach the new virtual device, then store the name of the device */
653 static int
654 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
655 {
656         char name[RTE_ETH_NAME_MAX_LEN];
657
658         if (vdevname == NULL)
659                 goto err;
660
661         /* check whether the driver supports detach feature, or not */
662         if (rte_eth_dev_is_detachable(port_id))
663                 goto err;
664
665         /* get device name by port id */
666         if (rte_eth_dev_get_name_by_port(port_id, name))
667                 goto err;
668         /* walk around dev_driver_list to find the driver of the device,
669          * then invoke uninit function of the driver */
670         if (rte_eal_vdev_uninit(name))
671                 goto err;
672
673         strncpy(vdevname, name, sizeof(name));
674         return 0;
675 err:
676         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
677         return -1;
678 }
679
680 /* attach the new device, then store port_id of the device */
681 int
682 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
683 {
684         struct rte_pci_addr addr;
685
686         if ((devargs == NULL) || (port_id == NULL))
687                 return -EINVAL;
688
689         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
690                 return rte_eth_dev_attach_pdev(&addr, port_id);
691         else
692                 return rte_eth_dev_attach_vdev(devargs, port_id);
693 }
694
695 /* detach the device, then store the name of the device */
696 int
697 rte_eth_dev_detach(uint8_t port_id, char *name)
698 {
699         struct rte_pci_addr addr;
700         int ret;
701
702         if (name == NULL)
703                 return -EINVAL;
704
705         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
706                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
707                 if (ret < 0)
708                         return ret;
709
710                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
711                 if (ret == 0)
712                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
713                                 "%04x:%02x:%02x.%d",
714                                 addr.domain, addr.bus,
715                                 addr.devid, addr.function);
716
717                 return ret;
718         } else
719                 return rte_eth_dev_detach_vdev(port_id, name);
720 }
721
722 static int
723 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
724 {
725         uint16_t old_nb_queues = dev->data->nb_rx_queues;
726         void **rxq;
727         unsigned i;
728
729         if (dev->data->rx_queues == NULL) { /* first time configuration */
730                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
731                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
732                                 RTE_CACHE_LINE_SIZE);
733                 if (dev->data->rx_queues == NULL) {
734                         dev->data->nb_rx_queues = 0;
735                         return -(ENOMEM);
736                 }
737         } else { /* re-configure */
738                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
739
740                 rxq = dev->data->rx_queues;
741
742                 for (i = nb_queues; i < old_nb_queues; i++)
743                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
744                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
745                                 RTE_CACHE_LINE_SIZE);
746                 if (rxq == NULL)
747                         return -(ENOMEM);
748                 if (nb_queues > old_nb_queues) {
749                         uint16_t new_qs = nb_queues - old_nb_queues;
750
751                         memset(rxq + old_nb_queues, 0,
752                                 sizeof(rxq[0]) * new_qs);
753                 }
754
755                 dev->data->rx_queues = rxq;
756
757         }
758         dev->data->nb_rx_queues = nb_queues;
759         return 0;
760 }
761
762 int
763 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
764 {
765         struct rte_eth_dev *dev;
766
767         /* This function is only safe when called from the primary process
768          * in a multi-process setup*/
769         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
770
771         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
772
773         dev = &rte_eth_devices[port_id];
774         if (rx_queue_id >= dev->data->nb_rx_queues) {
775                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
776                 return -EINVAL;
777         }
778
779         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
780
781         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
782                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
783                         " already started\n",
784                         rx_queue_id, port_id);
785                 return 0;
786         }
787
788         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
789
790 }
791
792 int
793 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
794 {
795         struct rte_eth_dev *dev;
796
797         /* This function is only safe when called from the primary process
798          * in a multi-process setup*/
799         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
800
801         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
802
803         dev = &rte_eth_devices[port_id];
804         if (rx_queue_id >= dev->data->nb_rx_queues) {
805                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
806                 return -EINVAL;
807         }
808
809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
810
811         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
812                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
813                         " already stopped\n",
814                         rx_queue_id, port_id);
815                 return 0;
816         }
817
818         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
819
820 }
821
822 int
823 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
824 {
825         struct rte_eth_dev *dev;
826
827         /* This function is only safe when called from the primary process
828          * in a multi-process setup*/
829         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
830
831         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
832
833         dev = &rte_eth_devices[port_id];
834         if (tx_queue_id >= dev->data->nb_tx_queues) {
835                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
836                 return -EINVAL;
837         }
838
839         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
840
841         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
842                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
843                         " already started\n",
844                         tx_queue_id, port_id);
845                 return 0;
846         }
847
848         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
849
850 }
851
852 int
853 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
854 {
855         struct rte_eth_dev *dev;
856
857         /* This function is only safe when called from the primary process
858          * in a multi-process setup*/
859         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
860
861         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
862
863         dev = &rte_eth_devices[port_id];
864         if (tx_queue_id >= dev->data->nb_tx_queues) {
865                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
866                 return -EINVAL;
867         }
868
869         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
870
871         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
872                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
873                         " already stopped\n",
874                         tx_queue_id, port_id);
875                 return 0;
876         }
877
878         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
879
880 }
881
882 static int
883 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
884 {
885         uint16_t old_nb_queues = dev->data->nb_tx_queues;
886         void **txq;
887         unsigned i;
888
889         if (dev->data->tx_queues == NULL) { /* first time configuration */
890                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
891                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
892                                                    RTE_CACHE_LINE_SIZE);
893                 if (dev->data->tx_queues == NULL) {
894                         dev->data->nb_tx_queues = 0;
895                         return -(ENOMEM);
896                 }
897         } else { /* re-configure */
898                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
899
900                 txq = dev->data->tx_queues;
901
902                 for (i = nb_queues; i < old_nb_queues; i++)
903                         (*dev->dev_ops->tx_queue_release)(txq[i]);
904                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
905                                   RTE_CACHE_LINE_SIZE);
906                 if (txq == NULL)
907                         return -ENOMEM;
908                 if (nb_queues > old_nb_queues) {
909                         uint16_t new_qs = nb_queues - old_nb_queues;
910
911                         memset(txq + old_nb_queues, 0,
912                                sizeof(txq[0]) * new_qs);
913                 }
914
915                 dev->data->tx_queues = txq;
916
917         }
918         dev->data->nb_tx_queues = nb_queues;
919         return 0;
920 }
921
922 int
923 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
924                       const struct rte_eth_conf *dev_conf)
925 {
926         struct rte_eth_dev *dev;
927         struct rte_eth_dev_info dev_info;
928         int diag;
929
930         /* This function is only safe when called from the primary process
931          * in a multi-process setup*/
932         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
933
934         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
935
936         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
937                 PMD_DEBUG_TRACE(
938                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
939                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
940                 return -EINVAL;
941         }
942
943         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
944                 PMD_DEBUG_TRACE(
945                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
946                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
947                 return -EINVAL;
948         }
949
950         dev = &rte_eth_devices[port_id];
951
952         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
953         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
954
955         if (dev->data->dev_started) {
956                 PMD_DEBUG_TRACE(
957                     "port %d must be stopped to allow configuration\n", port_id);
958                 return -EBUSY;
959         }
960
961         /*
962          * Check that the numbers of RX and TX queues are not greater
963          * than the maximum number of RX and TX queues supported by the
964          * configured device.
965          */
966         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
967         if (nb_rx_q > dev_info.max_rx_queues) {
968                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
969                                 port_id, nb_rx_q, dev_info.max_rx_queues);
970                 return -EINVAL;
971         }
972         if (nb_rx_q == 0) {
973                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
974                 return -EINVAL;
975         }
976
977         if (nb_tx_q > dev_info.max_tx_queues) {
978                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
979                                 port_id, nb_tx_q, dev_info.max_tx_queues);
980                 return -EINVAL;
981         }
982         if (nb_tx_q == 0) {
983                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
984                 return -EINVAL;
985         }
986
987         /* Copy the dev_conf parameter into the dev structure */
988         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
989
990         /*
991          * If link state interrupt is enabled, check that the
992          * device supports it.
993          */
994         if ((dev_conf->intr_conf.lsc == 1) &&
995                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
996                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
997                                         dev->data->drv_name);
998                         return -EINVAL;
999         }
1000
1001         /*
1002          * If jumbo frames are enabled, check that the maximum RX packet
1003          * length is supported by the configured device.
1004          */
1005         if (dev_conf->rxmode.jumbo_frame == 1) {
1006                 if (dev_conf->rxmode.max_rx_pkt_len >
1007                     dev_info.max_rx_pktlen) {
1008                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1009                                 " > max valid value %u\n",
1010                                 port_id,
1011                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1012                                 (unsigned)dev_info.max_rx_pktlen);
1013                         return -EINVAL;
1014                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1015                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1016                                 " < min valid value %u\n",
1017                                 port_id,
1018                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1019                                 (unsigned)ETHER_MIN_LEN);
1020                         return -EINVAL;
1021                 }
1022         } else {
1023                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1024                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1025                         /* Use default value */
1026                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1027                                                         ETHER_MAX_LEN;
1028         }
1029
1030         /*
1031          * Setup new number of RX/TX queues and reconfigure device.
1032          */
1033         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1034         if (diag != 0) {
1035                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1036                                 port_id, diag);
1037                 return diag;
1038         }
1039
1040         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1041         if (diag != 0) {
1042                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1043                                 port_id, diag);
1044                 rte_eth_dev_rx_queue_config(dev, 0);
1045                 return diag;
1046         }
1047
1048         diag = (*dev->dev_ops->dev_configure)(dev);
1049         if (diag != 0) {
1050                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1051                                 port_id, diag);
1052                 rte_eth_dev_rx_queue_config(dev, 0);
1053                 rte_eth_dev_tx_queue_config(dev, 0);
1054                 return diag;
1055         }
1056
1057         return 0;
1058 }
1059
1060 static void
1061 rte_eth_dev_config_restore(uint8_t port_id)
1062 {
1063         struct rte_eth_dev *dev;
1064         struct rte_eth_dev_info dev_info;
1065         struct ether_addr addr;
1066         uint16_t i;
1067         uint32_t pool = 0;
1068
1069         dev = &rte_eth_devices[port_id];
1070
1071         rte_eth_dev_info_get(port_id, &dev_info);
1072
1073         if (RTE_ETH_DEV_SRIOV(dev).active)
1074                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1075
1076         /* replay MAC address configuration */
1077         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1078                 addr = dev->data->mac_addrs[i];
1079
1080                 /* skip zero address */
1081                 if (is_zero_ether_addr(&addr))
1082                         continue;
1083
1084                 /* add address to the hardware */
1085                 if  (*dev->dev_ops->mac_addr_add &&
1086                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1087                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1088                 else {
1089                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1090                                         port_id);
1091                         /* exit the loop but not return an error */
1092                         break;
1093                 }
1094         }
1095
1096         /* replay promiscuous configuration */
1097         if (rte_eth_promiscuous_get(port_id) == 1)
1098                 rte_eth_promiscuous_enable(port_id);
1099         else if (rte_eth_promiscuous_get(port_id) == 0)
1100                 rte_eth_promiscuous_disable(port_id);
1101
1102         /* replay all multicast configuration */
1103         if (rte_eth_allmulticast_get(port_id) == 1)
1104                 rte_eth_allmulticast_enable(port_id);
1105         else if (rte_eth_allmulticast_get(port_id) == 0)
1106                 rte_eth_allmulticast_disable(port_id);
1107 }
1108
1109 int
1110 rte_eth_dev_start(uint8_t port_id)
1111 {
1112         struct rte_eth_dev *dev;
1113         int diag;
1114
1115         /* This function is only safe when called from the primary process
1116          * in a multi-process setup*/
1117         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1118
1119         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1120
1121         dev = &rte_eth_devices[port_id];
1122
1123         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1124
1125         if (dev->data->dev_started != 0) {
1126                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1127                         " already started\n",
1128                         port_id);
1129                 return 0;
1130         }
1131
1132         diag = (*dev->dev_ops->dev_start)(dev);
1133         if (diag == 0)
1134                 dev->data->dev_started = 1;
1135         else
1136                 return diag;
1137
1138         rte_eth_dev_config_restore(port_id);
1139
1140         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1141                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1142                 (*dev->dev_ops->link_update)(dev, 0);
1143         }
1144         return 0;
1145 }
1146
1147 void
1148 rte_eth_dev_stop(uint8_t port_id)
1149 {
1150         struct rte_eth_dev *dev;
1151
1152         /* This function is only safe when called from the primary process
1153          * in a multi-process setup*/
1154         PROC_PRIMARY_OR_RET();
1155
1156         VALID_PORTID_OR_RET(port_id);
1157         dev = &rte_eth_devices[port_id];
1158
1159         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1160
1161         if (dev->data->dev_started == 0) {
1162                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1163                         " already stopped\n",
1164                         port_id);
1165                 return;
1166         }
1167
1168         dev->data->dev_started = 0;
1169         (*dev->dev_ops->dev_stop)(dev);
1170 }
1171
1172 int
1173 rte_eth_dev_set_link_up(uint8_t port_id)
1174 {
1175         struct rte_eth_dev *dev;
1176
1177         /* This function is only safe when called from the primary process
1178          * in a multi-process setup*/
1179         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1180
1181         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1182
1183         dev = &rte_eth_devices[port_id];
1184
1185         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1186         return (*dev->dev_ops->dev_set_link_up)(dev);
1187 }
1188
1189 int
1190 rte_eth_dev_set_link_down(uint8_t port_id)
1191 {
1192         struct rte_eth_dev *dev;
1193
1194         /* This function is only safe when called from the primary process
1195          * in a multi-process setup*/
1196         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1197
1198         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1199
1200         dev = &rte_eth_devices[port_id];
1201
1202         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1203         return (*dev->dev_ops->dev_set_link_down)(dev);
1204 }
1205
1206 void
1207 rte_eth_dev_close(uint8_t port_id)
1208 {
1209         struct rte_eth_dev *dev;
1210
1211         /* This function is only safe when called from the primary process
1212          * in a multi-process setup*/
1213         PROC_PRIMARY_OR_RET();
1214
1215         VALID_PORTID_OR_RET(port_id);
1216         dev = &rte_eth_devices[port_id];
1217
1218         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1219         dev->data->dev_started = 0;
1220         (*dev->dev_ops->dev_close)(dev);
1221
1222         rte_free(dev->data->rx_queues);
1223         dev->data->rx_queues = NULL;
1224         rte_free(dev->data->tx_queues);
1225         dev->data->tx_queues = NULL;
1226 }
1227
1228 int
1229 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1230                        uint16_t nb_rx_desc, unsigned int socket_id,
1231                        const struct rte_eth_rxconf *rx_conf,
1232                        struct rte_mempool *mp)
1233 {
1234         int ret;
1235         uint32_t mbp_buf_size;
1236         struct rte_eth_dev *dev;
1237         struct rte_eth_dev_info dev_info;
1238
1239         /* This function is only safe when called from the primary process
1240          * in a multi-process setup*/
1241         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1242
1243         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1244
1245         dev = &rte_eth_devices[port_id];
1246         if (rx_queue_id >= dev->data->nb_rx_queues) {
1247                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1248                 return -EINVAL;
1249         }
1250
1251         if (dev->data->dev_started) {
1252                 PMD_DEBUG_TRACE(
1253                     "port %d must be stopped to allow configuration\n", port_id);
1254                 return -EBUSY;
1255         }
1256
1257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1258         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1259
1260         /*
1261          * Check the size of the mbuf data buffer.
1262          * This value must be provided in the private data of the memory pool.
1263          * First check that the memory pool has a valid private data.
1264          */
1265         rte_eth_dev_info_get(port_id, &dev_info);
1266         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1267                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1268                                 mp->name, (int) mp->private_data_size,
1269                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1270                 return -ENOSPC;
1271         }
1272         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1273
1274         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1275                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1276                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1277                                 "=%d)\n",
1278                                 mp->name,
1279                                 (int)mbp_buf_size,
1280                                 (int)(RTE_PKTMBUF_HEADROOM +
1281                                       dev_info.min_rx_bufsize),
1282                                 (int)RTE_PKTMBUF_HEADROOM,
1283                                 (int)dev_info.min_rx_bufsize);
1284                 return -EINVAL;
1285         }
1286
1287         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1288                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1289                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1290
1291                 PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1292                         "should be: <= %hu, = %hu, and a product of %hu\n",
1293                         nb_rx_desc,
1294                         dev_info.rx_desc_lim.nb_max,
1295                         dev_info.rx_desc_lim.nb_min,
1296                         dev_info.rx_desc_lim.nb_align);
1297                 return -EINVAL;
1298         }
1299
1300         if (rx_conf == NULL)
1301                 rx_conf = &dev_info.default_rxconf;
1302
1303         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1304                                               socket_id, rx_conf, mp);
1305         if (!ret) {
1306                 if (!dev->data->min_rx_buf_size ||
1307                     dev->data->min_rx_buf_size > mbp_buf_size)
1308                         dev->data->min_rx_buf_size = mbp_buf_size;
1309         }
1310
1311         return ret;
1312 }
1313
1314 int
1315 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1316                        uint16_t nb_tx_desc, unsigned int socket_id,
1317                        const struct rte_eth_txconf *tx_conf)
1318 {
1319         struct rte_eth_dev *dev;
1320         struct rte_eth_dev_info dev_info;
1321
1322         /* This function is only safe when called from the primary process
1323          * in a multi-process setup*/
1324         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1325
1326         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1327
1328         dev = &rte_eth_devices[port_id];
1329         if (tx_queue_id >= dev->data->nb_tx_queues) {
1330                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1331                 return -EINVAL;
1332         }
1333
1334         if (dev->data->dev_started) {
1335                 PMD_DEBUG_TRACE(
1336                     "port %d must be stopped to allow configuration\n", port_id);
1337                 return -EBUSY;
1338         }
1339
1340         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1342
1343         rte_eth_dev_info_get(port_id, &dev_info);
1344
1345         if (tx_conf == NULL)
1346                 tx_conf = &dev_info.default_txconf;
1347
1348         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1349                                                socket_id, tx_conf);
1350 }
1351
1352 void
1353 rte_eth_promiscuous_enable(uint8_t port_id)
1354 {
1355         struct rte_eth_dev *dev;
1356
1357         VALID_PORTID_OR_RET(port_id);
1358         dev = &rte_eth_devices[port_id];
1359
1360         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1361         (*dev->dev_ops->promiscuous_enable)(dev);
1362         dev->data->promiscuous = 1;
1363 }
1364
1365 void
1366 rte_eth_promiscuous_disable(uint8_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369
1370         VALID_PORTID_OR_RET(port_id);
1371         dev = &rte_eth_devices[port_id];
1372
1373         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1374         dev->data->promiscuous = 0;
1375         (*dev->dev_ops->promiscuous_disable)(dev);
1376 }
1377
1378 int
1379 rte_eth_promiscuous_get(uint8_t port_id)
1380 {
1381         struct rte_eth_dev *dev;
1382
1383         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1384
1385         dev = &rte_eth_devices[port_id];
1386         return dev->data->promiscuous;
1387 }
1388
1389 void
1390 rte_eth_allmulticast_enable(uint8_t port_id)
1391 {
1392         struct rte_eth_dev *dev;
1393
1394         VALID_PORTID_OR_RET(port_id);
1395         dev = &rte_eth_devices[port_id];
1396
1397         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1398         (*dev->dev_ops->allmulticast_enable)(dev);
1399         dev->data->all_multicast = 1;
1400 }
1401
1402 void
1403 rte_eth_allmulticast_disable(uint8_t port_id)
1404 {
1405         struct rte_eth_dev *dev;
1406
1407         VALID_PORTID_OR_RET(port_id);
1408         dev = &rte_eth_devices[port_id];
1409
1410         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1411         dev->data->all_multicast = 0;
1412         (*dev->dev_ops->allmulticast_disable)(dev);
1413 }
1414
1415 int
1416 rte_eth_allmulticast_get(uint8_t port_id)
1417 {
1418         struct rte_eth_dev *dev;
1419
1420         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1421
1422         dev = &rte_eth_devices[port_id];
1423         return dev->data->all_multicast;
1424 }
1425
1426 static inline int
1427 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1428                                 struct rte_eth_link *link)
1429 {
1430         struct rte_eth_link *dst = link;
1431         struct rte_eth_link *src = &(dev->data->dev_link);
1432
1433         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1434                                         *(uint64_t *)src) == 0)
1435                 return -1;
1436
1437         return 0;
1438 }
1439
1440 void
1441 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1442 {
1443         struct rte_eth_dev *dev;
1444
1445         VALID_PORTID_OR_RET(port_id);
1446         dev = &rte_eth_devices[port_id];
1447
1448         if (dev->data->dev_conf.intr_conf.lsc != 0)
1449                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1450         else {
1451                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1452                 (*dev->dev_ops->link_update)(dev, 1);
1453                 *eth_link = dev->data->dev_link;
1454         }
1455 }
1456
1457 void
1458 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1459 {
1460         struct rte_eth_dev *dev;
1461
1462         VALID_PORTID_OR_RET(port_id);
1463         dev = &rte_eth_devices[port_id];
1464
1465         if (dev->data->dev_conf.intr_conf.lsc != 0)
1466                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1467         else {
1468                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1469                 (*dev->dev_ops->link_update)(dev, 0);
1470                 *eth_link = dev->data->dev_link;
1471         }
1472 }
1473
1474 int
1475 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1476 {
1477         struct rte_eth_dev *dev;
1478
1479         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1480
1481         dev = &rte_eth_devices[port_id];
1482         memset(stats, 0, sizeof(*stats));
1483
1484         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1485         (*dev->dev_ops->stats_get)(dev, stats);
1486         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1487         return 0;
1488 }
1489
1490 void
1491 rte_eth_stats_reset(uint8_t port_id)
1492 {
1493         struct rte_eth_dev *dev;
1494
1495         VALID_PORTID_OR_RET(port_id);
1496         dev = &rte_eth_devices[port_id];
1497
1498         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1499         (*dev->dev_ops->stats_reset)(dev);
1500 }
1501
1502 /* retrieve ethdev extended statistics */
1503 int
1504 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1505         unsigned n)
1506 {
1507         struct rte_eth_stats eth_stats;
1508         struct rte_eth_dev *dev;
1509         unsigned count = 0, i, q;
1510         signed xcount = 0;
1511         uint64_t val, *stats_ptr;
1512
1513         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1514
1515         dev = &rte_eth_devices[port_id];
1516
1517         /* Return generic statistics */
1518         count = RTE_NB_STATS;
1519
1520         /* implemented by the driver */
1521         if (dev->dev_ops->xstats_get != NULL) {
1522                 /* Retrieve the xstats from the driver at the end of the
1523                  * xstats struct.
1524                  */
1525                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1526                          (n > count) ? n - count : 0);
1527
1528                 if (xcount < 0)
1529                         return xcount;
1530         } else {
1531                 count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1532                 count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1533         }
1534
1535         if (n < count + xcount)
1536                 return count + xcount;
1537
1538         /* now fill the xstats structure */
1539         count = 0;
1540         rte_eth_stats_get(port_id, &eth_stats);
1541
1542         /* global stats */
1543         for (i = 0; i < RTE_NB_STATS; i++) {
1544                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1545                                         rte_stats_strings[i].offset);
1546                 val = *stats_ptr;
1547                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1548                         "%s", rte_stats_strings[i].name);
1549                 xstats[count++].value = val;
1550         }
1551
1552         /* if xstats_get() is implemented by the PMD, the Q stats are done */
1553         if (dev->dev_ops->xstats_get != NULL)
1554                 return count + xcount;
1555
1556         /* per-rxq stats */
1557         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1558                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1559                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1560                                         rte_rxq_stats_strings[i].offset +
1561                                         q * sizeof(uint64_t));
1562                         val = *stats_ptr;
1563                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1564                                 "rx_q%u_%s", q,
1565                                 rte_rxq_stats_strings[i].name);
1566                         xstats[count++].value = val;
1567                 }
1568         }
1569
1570         /* per-txq stats */
1571         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1572                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1573                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1574                                         rte_txq_stats_strings[i].offset +
1575                                         q * sizeof(uint64_t));
1576                         val = *stats_ptr;
1577                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1578                                 "tx_q%u_%s", q,
1579                                 rte_txq_stats_strings[i].name);
1580                         xstats[count++].value = val;
1581                 }
1582         }
1583
1584         return count + xcount;
1585 }
1586
1587 /* reset ethdev extended statistics */
1588 void
1589 rte_eth_xstats_reset(uint8_t port_id)
1590 {
1591         struct rte_eth_dev *dev;
1592
1593         VALID_PORTID_OR_RET(port_id);
1594         dev = &rte_eth_devices[port_id];
1595
1596         /* implemented by the driver */
1597         if (dev->dev_ops->xstats_reset != NULL) {
1598                 (*dev->dev_ops->xstats_reset)(dev);
1599                 return;
1600         }
1601
1602         /* fallback to default */
1603         rte_eth_stats_reset(port_id);
1604 }
1605
1606 static int
1607 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1608                 uint8_t is_rx)
1609 {
1610         struct rte_eth_dev *dev;
1611
1612         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1613
1614         dev = &rte_eth_devices[port_id];
1615
1616         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1617         return (*dev->dev_ops->queue_stats_mapping_set)
1618                         (dev, queue_id, stat_idx, is_rx);
1619 }
1620
1621
1622 int
1623 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1624                 uint8_t stat_idx)
1625 {
1626         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1627                         STAT_QMAP_TX);
1628 }
1629
1630
1631 int
1632 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1633                 uint8_t stat_idx)
1634 {
1635         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1636                         STAT_QMAP_RX);
1637 }
1638
1639
1640 void
1641 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1642 {
1643         struct rte_eth_dev *dev;
1644         const struct rte_eth_desc_lim lim = {
1645                 .nb_max = UINT16_MAX,
1646                 .nb_min = 0,
1647                 .nb_align = 1,
1648         };
1649
1650         VALID_PORTID_OR_RET(port_id);
1651         dev = &rte_eth_devices[port_id];
1652
1653         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1654         dev_info->rx_desc_lim = lim;
1655         dev_info->tx_desc_lim = lim;
1656
1657         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1658         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1659         dev_info->pci_dev = dev->pci_dev;
1660         dev_info->driver_name = dev->data->drv_name;
1661 }
1662
1663 void
1664 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1665 {
1666         struct rte_eth_dev *dev;
1667
1668         VALID_PORTID_OR_RET(port_id);
1669         dev = &rte_eth_devices[port_id];
1670         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1671 }
1672
1673
1674 int
1675 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1676 {
1677         struct rte_eth_dev *dev;
1678
1679         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1680
1681         dev = &rte_eth_devices[port_id];
1682         *mtu = dev->data->mtu;
1683         return 0;
1684 }
1685
1686 int
1687 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1688 {
1689         int ret;
1690         struct rte_eth_dev *dev;
1691
1692         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1693         dev = &rte_eth_devices[port_id];
1694         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1695
1696         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1697         if (!ret)
1698                 dev->data->mtu = mtu;
1699
1700         return ret;
1701 }
1702
1703 int
1704 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1705 {
1706         struct rte_eth_dev *dev;
1707
1708         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1709         dev = &rte_eth_devices[port_id];
1710         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1711                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1712                 return -ENOSYS;
1713         }
1714
1715         if (vlan_id > 4095) {
1716                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1717                                 port_id, (unsigned) vlan_id);
1718                 return -EINVAL;
1719         }
1720         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1721
1722         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1723 }
1724
1725 int
1726 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1727 {
1728         struct rte_eth_dev *dev;
1729
1730         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1731         dev = &rte_eth_devices[port_id];
1732         if (rx_queue_id >= dev->data->nb_rx_queues) {
1733                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1734                 return -EINVAL;
1735         }
1736
1737         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1738         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1739
1740         return 0;
1741 }
1742
1743 int
1744 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1745 {
1746         struct rte_eth_dev *dev;
1747
1748         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1749         dev = &rte_eth_devices[port_id];
1750         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1751         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1752
1753         return 0;
1754 }
1755
1756 int
1757 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1758 {
1759         struct rte_eth_dev *dev;
1760         int ret = 0;
1761         int mask = 0;
1762         int cur, org = 0;
1763
1764         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1765         dev = &rte_eth_devices[port_id];
1766
1767         /*check which option changed by application*/
1768         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1769         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1770         if (cur != org) {
1771                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1772                 mask |= ETH_VLAN_STRIP_MASK;
1773         }
1774
1775         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1776         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1777         if (cur != org) {
1778                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1779                 mask |= ETH_VLAN_FILTER_MASK;
1780         }
1781
1782         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1783         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1784         if (cur != org) {
1785                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1786                 mask |= ETH_VLAN_EXTEND_MASK;
1787         }
1788
1789         /*no change*/
1790         if (mask == 0)
1791                 return ret;
1792
1793         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1794         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1795
1796         return ret;
1797 }
1798
1799 int
1800 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1801 {
1802         struct rte_eth_dev *dev;
1803         int ret = 0;
1804
1805         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1806         dev = &rte_eth_devices[port_id];
1807
1808         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1809                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1810
1811         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1812                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1813
1814         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1815                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1816
1817         return ret;
1818 }
1819
1820 int
1821 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1822 {
1823         struct rte_eth_dev *dev;
1824
1825         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1826         dev = &rte_eth_devices[port_id];
1827         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1828         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1829
1830         return 0;
1831 }
1832
1833 int
1834 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1835 {
1836         struct rte_eth_dev *dev;
1837
1838         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1839         dev = &rte_eth_devices[port_id];
1840         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1841         memset(fc_conf, 0, sizeof(*fc_conf));
1842         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1843 }
1844
1845 int
1846 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1847 {
1848         struct rte_eth_dev *dev;
1849
1850         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1851         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1852                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1853                 return -EINVAL;
1854         }
1855
1856         dev = &rte_eth_devices[port_id];
1857         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1858         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1859 }
1860
1861 int
1862 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1863 {
1864         struct rte_eth_dev *dev;
1865
1866         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1867         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1868                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1869                 return -EINVAL;
1870         }
1871
1872         dev = &rte_eth_devices[port_id];
1873         /* High water, low water validation are device specific */
1874         if  (*dev->dev_ops->priority_flow_ctrl_set)
1875                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1876         return -ENOTSUP;
1877 }
1878
1879 static int
1880 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1881                         uint16_t reta_size)
1882 {
1883         uint16_t i, num;
1884
1885         if (!reta_conf)
1886                 return -EINVAL;
1887
1888         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1889                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1890                                                         RTE_RETA_GROUP_SIZE);
1891                 return -EINVAL;
1892         }
1893
1894         num = reta_size / RTE_RETA_GROUP_SIZE;
1895         for (i = 0; i < num; i++) {
1896                 if (reta_conf[i].mask)
1897                         return 0;
1898         }
1899
1900         return -EINVAL;
1901 }
1902
1903 static int
1904 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1905                          uint16_t reta_size,
1906                          uint8_t max_rxq)
1907 {
1908         uint16_t i, idx, shift;
1909
1910         if (!reta_conf)
1911                 return -EINVAL;
1912
1913         if (max_rxq == 0) {
1914                 PMD_DEBUG_TRACE("No receive queue is available\n");
1915                 return -EINVAL;
1916         }
1917
1918         for (i = 0; i < reta_size; i++) {
1919                 idx = i / RTE_RETA_GROUP_SIZE;
1920                 shift = i % RTE_RETA_GROUP_SIZE;
1921                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1922                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1923                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1924                                 "the maximum rxq index: %u\n", idx, shift,
1925                                 reta_conf[idx].reta[shift], max_rxq);
1926                         return -EINVAL;
1927                 }
1928         }
1929
1930         return 0;
1931 }
1932
1933 int
1934 rte_eth_dev_rss_reta_update(uint8_t port_id,
1935                             struct rte_eth_rss_reta_entry64 *reta_conf,
1936                             uint16_t reta_size)
1937 {
1938         struct rte_eth_dev *dev;
1939         int ret;
1940
1941         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1942         /* Check mask bits */
1943         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1944         if (ret < 0)
1945                 return ret;
1946
1947         dev = &rte_eth_devices[port_id];
1948
1949         /* Check entry value */
1950         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1951                                 dev->data->nb_rx_queues);
1952         if (ret < 0)
1953                 return ret;
1954
1955         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1956         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1957 }
1958
1959 int
1960 rte_eth_dev_rss_reta_query(uint8_t port_id,
1961                            struct rte_eth_rss_reta_entry64 *reta_conf,
1962                            uint16_t reta_size)
1963 {
1964         struct rte_eth_dev *dev;
1965         int ret;
1966
1967         if (port_id >= nb_ports) {
1968                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1969                 return -ENODEV;
1970         }
1971
1972         /* Check mask bits */
1973         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1974         if (ret < 0)
1975                 return ret;
1976
1977         dev = &rte_eth_devices[port_id];
1978         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1979         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1980 }
1981
1982 int
1983 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1984 {
1985         struct rte_eth_dev *dev;
1986         uint16_t rss_hash_protos;
1987
1988         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1989         rss_hash_protos = rss_conf->rss_hf;
1990         if ((rss_hash_protos != 0) &&
1991             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1992                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1993                                 rss_hash_protos);
1994                 return -EINVAL;
1995         }
1996         dev = &rte_eth_devices[port_id];
1997         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1998         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1999 }
2000
2001 int
2002 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2003                               struct rte_eth_rss_conf *rss_conf)
2004 {
2005         struct rte_eth_dev *dev;
2006
2007         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2008         dev = &rte_eth_devices[port_id];
2009         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2010         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2011 }
2012
2013 int
2014 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2015                            struct rte_eth_udp_tunnel *udp_tunnel)
2016 {
2017         struct rte_eth_dev *dev;
2018
2019         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2020         if (udp_tunnel == NULL) {
2021                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2022                 return -EINVAL;
2023         }
2024
2025         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2026                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2027                 return -EINVAL;
2028         }
2029
2030         dev = &rte_eth_devices[port_id];
2031         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2032         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2033 }
2034
2035 int
2036 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2037                               struct rte_eth_udp_tunnel *udp_tunnel)
2038 {
2039         struct rte_eth_dev *dev;
2040
2041         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2042         dev = &rte_eth_devices[port_id];
2043
2044         if (udp_tunnel == NULL) {
2045                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2046                 return -EINVAL;
2047         }
2048
2049         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2050                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2051                 return -EINVAL;
2052         }
2053
2054         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2055         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2056 }
2057
2058 int
2059 rte_eth_led_on(uint8_t port_id)
2060 {
2061         struct rte_eth_dev *dev;
2062
2063         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2064         dev = &rte_eth_devices[port_id];
2065         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2066         return (*dev->dev_ops->dev_led_on)(dev);
2067 }
2068
2069 int
2070 rte_eth_led_off(uint8_t port_id)
2071 {
2072         struct rte_eth_dev *dev;
2073
2074         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2075         dev = &rte_eth_devices[port_id];
2076         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2077         return (*dev->dev_ops->dev_led_off)(dev);
2078 }
2079
2080 /*
2081  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2082  * an empty spot.
2083  */
2084 static int
2085 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2086 {
2087         struct rte_eth_dev_info dev_info;
2088         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2089         unsigned i;
2090
2091         rte_eth_dev_info_get(port_id, &dev_info);
2092
2093         for (i = 0; i < dev_info.max_mac_addrs; i++)
2094                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2095                         return i;
2096
2097         return -1;
2098 }
2099
2100 static const struct ether_addr null_mac_addr;
2101
2102 int
2103 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2104                         uint32_t pool)
2105 {
2106         struct rte_eth_dev *dev;
2107         int index;
2108         uint64_t pool_mask;
2109
2110         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2111         dev = &rte_eth_devices[port_id];
2112         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2113
2114         if (is_zero_ether_addr(addr)) {
2115                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2116                         port_id);
2117                 return -EINVAL;
2118         }
2119         if (pool >= ETH_64_POOLS) {
2120                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2121                 return -EINVAL;
2122         }
2123
2124         index = get_mac_addr_index(port_id, addr);
2125         if (index < 0) {
2126                 index = get_mac_addr_index(port_id, &null_mac_addr);
2127                 if (index < 0) {
2128                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2129                                 port_id);
2130                         return -ENOSPC;
2131                 }
2132         } else {
2133                 pool_mask = dev->data->mac_pool_sel[index];
2134
2135                 /* Check if both MAC address and pool is already there, and do nothing */
2136                 if (pool_mask & (1ULL << pool))
2137                         return 0;
2138         }
2139
2140         /* Update NIC */
2141         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2142
2143         /* Update address in NIC data structure */
2144         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2145
2146         /* Update pool bitmap in NIC data structure */
2147         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2148
2149         return 0;
2150 }
2151
2152 int
2153 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2154 {
2155         struct rte_eth_dev *dev;
2156         int index;
2157
2158         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2159         dev = &rte_eth_devices[port_id];
2160         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2161
2162         index = get_mac_addr_index(port_id, addr);
2163         if (index == 0) {
2164                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2165                 return -EADDRINUSE;
2166         } else if (index < 0)
2167                 return 0;  /* Do nothing if address wasn't found */
2168
2169         /* Update NIC */
2170         (*dev->dev_ops->mac_addr_remove)(dev, index);
2171
2172         /* Update address in NIC data structure */
2173         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2174
2175         /* reset pool bitmap */
2176         dev->data->mac_pool_sel[index] = 0;
2177
2178         return 0;
2179 }
2180
2181 int
2182 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2183 {
2184         struct rte_eth_dev *dev;
2185
2186         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2187
2188         if (!is_valid_assigned_ether_addr(addr))
2189                 return -EINVAL;
2190
2191         dev = &rte_eth_devices[port_id];
2192         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2193
2194         /* Update default address in NIC data structure */
2195         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2196
2197         (*dev->dev_ops->mac_addr_set)(dev, addr);
2198
2199         return 0;
2200 }
2201
2202 int
2203 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2204                                 uint16_t rx_mode, uint8_t on)
2205 {
2206         uint16_t num_vfs;
2207         struct rte_eth_dev *dev;
2208         struct rte_eth_dev_info dev_info;
2209
2210         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2211
2212         dev = &rte_eth_devices[port_id];
2213         rte_eth_dev_info_get(port_id, &dev_info);
2214
2215         num_vfs = dev_info.max_vfs;
2216         if (vf > num_vfs) {
2217                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2218                 return -EINVAL;
2219         }
2220
2221         if (rx_mode == 0) {
2222                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2223                 return -EINVAL;
2224         }
2225         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2226         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2227 }
2228
2229 /*
2230  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2231  * an empty spot.
2232  */
2233 static int
2234 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2235 {
2236         struct rte_eth_dev_info dev_info;
2237         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2238         unsigned i;
2239
2240         rte_eth_dev_info_get(port_id, &dev_info);
2241         if (!dev->data->hash_mac_addrs)
2242                 return -1;
2243
2244         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2245                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2246                         ETHER_ADDR_LEN) == 0)
2247                         return i;
2248
2249         return -1;
2250 }
2251
2252 int
2253 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2254                                 uint8_t on)
2255 {
2256         int index;
2257         int ret;
2258         struct rte_eth_dev *dev;
2259
2260         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2261
2262         dev = &rte_eth_devices[port_id];
2263         if (is_zero_ether_addr(addr)) {
2264                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2265                         port_id);
2266                 return -EINVAL;
2267         }
2268
2269         index = get_hash_mac_addr_index(port_id, addr);
2270         /* Check if it's already there, and do nothing */
2271         if ((index >= 0) && (on))
2272                 return 0;
2273
2274         if (index < 0) {
2275                 if (!on) {
2276                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2277                                 "set in UTA\n", port_id);
2278                         return -EINVAL;
2279                 }
2280
2281                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2282                 if (index < 0) {
2283                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2284                                         port_id);
2285                         return -ENOSPC;
2286                 }
2287         }
2288
2289         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2290         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2291         if (ret == 0) {
2292                 /* Update address in NIC data structure */
2293                 if (on)
2294                         ether_addr_copy(addr,
2295                                         &dev->data->hash_mac_addrs[index]);
2296                 else
2297                         ether_addr_copy(&null_mac_addr,
2298                                         &dev->data->hash_mac_addrs[index]);
2299         }
2300
2301         return ret;
2302 }
2303
2304 int
2305 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2306 {
2307         struct rte_eth_dev *dev;
2308
2309         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2310
2311         dev = &rte_eth_devices[port_id];
2312
2313         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2314         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2315 }
2316
2317 int
2318 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2319 {
2320         uint16_t num_vfs;
2321         struct rte_eth_dev *dev;
2322         struct rte_eth_dev_info dev_info;
2323
2324         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2325
2326         dev = &rte_eth_devices[port_id];
2327         rte_eth_dev_info_get(port_id, &dev_info);
2328
2329         num_vfs = dev_info.max_vfs;
2330         if (vf > num_vfs) {
2331                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2332                 return -EINVAL;
2333         }
2334
2335         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2336         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2337 }
2338
2339 int
2340 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2341 {
2342         uint16_t num_vfs;
2343         struct rte_eth_dev *dev;
2344         struct rte_eth_dev_info dev_info;
2345
2346         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2347
2348         dev = &rte_eth_devices[port_id];
2349         rte_eth_dev_info_get(port_id, &dev_info);
2350
2351         num_vfs = dev_info.max_vfs;
2352         if (vf > num_vfs) {
2353                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2354                 return -EINVAL;
2355         }
2356
2357         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2358         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2359 }
2360
2361 int
2362 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2363                                uint64_t vf_mask, uint8_t vlan_on)
2364 {
2365         struct rte_eth_dev *dev;
2366
2367         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2368
2369         dev = &rte_eth_devices[port_id];
2370
2371         if (vlan_id > ETHER_MAX_VLAN_ID) {
2372                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2373                         vlan_id);
2374                 return -EINVAL;
2375         }
2376
2377         if (vf_mask == 0) {
2378                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2379                 return -EINVAL;
2380         }
2381
2382         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2383         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2384                                                    vf_mask, vlan_on);
2385 }
2386
2387 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2388                                         uint16_t tx_rate)
2389 {
2390         struct rte_eth_dev *dev;
2391         struct rte_eth_dev_info dev_info;
2392         struct rte_eth_link link;
2393
2394         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2395
2396         dev = &rte_eth_devices[port_id];
2397         rte_eth_dev_info_get(port_id, &dev_info);
2398         link = dev->data->dev_link;
2399
2400         if (queue_idx > dev_info.max_tx_queues) {
2401                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2402                                 "invalid queue id=%d\n", port_id, queue_idx);
2403                 return -EINVAL;
2404         }
2405
2406         if (tx_rate > link.link_speed) {
2407                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2408                                 "bigger than link speed= %d\n",
2409                         tx_rate, link.link_speed);
2410                 return -EINVAL;
2411         }
2412
2413         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2414         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2415 }
2416
2417 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2418                                 uint64_t q_msk)
2419 {
2420         struct rte_eth_dev *dev;
2421         struct rte_eth_dev_info dev_info;
2422         struct rte_eth_link link;
2423
2424         if (q_msk == 0)
2425                 return 0;
2426
2427         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2428
2429         dev = &rte_eth_devices[port_id];
2430         rte_eth_dev_info_get(port_id, &dev_info);
2431         link = dev->data->dev_link;
2432
2433         if (vf > dev_info.max_vfs) {
2434                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2435                                 "invalid vf id=%d\n", port_id, vf);
2436                 return -EINVAL;
2437         }
2438
2439         if (tx_rate > link.link_speed) {
2440                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2441                                 "bigger than link speed= %d\n",
2442                                 tx_rate, link.link_speed);
2443                 return -EINVAL;
2444         }
2445
2446         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2447         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2448 }
2449
2450 int
2451 rte_eth_mirror_rule_set(uint8_t port_id,
2452                         struct rte_eth_mirror_conf *mirror_conf,
2453                         uint8_t rule_id, uint8_t on)
2454 {
2455         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2456
2457         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2458         if (mirror_conf->rule_type == 0) {
2459                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2460                 return -EINVAL;
2461         }
2462
2463         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2464                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2465                                 ETH_64_POOLS - 1);
2466                 return -EINVAL;
2467         }
2468
2469         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2470              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2471             (mirror_conf->pool_mask == 0)) {
2472                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2473                 return -EINVAL;
2474         }
2475
2476         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2477             mirror_conf->vlan.vlan_mask == 0) {
2478                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2479                 return -EINVAL;
2480         }
2481
2482         dev = &rte_eth_devices[port_id];
2483         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2484
2485         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2486 }
2487
2488 int
2489 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2490 {
2491         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2492
2493         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2494
2495         dev = &rte_eth_devices[port_id];
2496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2497
2498         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2499 }
2500
2501 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2502 uint16_t
2503 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2504                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2505 {
2506         struct rte_eth_dev *dev;
2507
2508         VALID_PORTID_OR_ERR_RET(port_id, 0);
2509
2510         dev = &rte_eth_devices[port_id];
2511         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2512         if (queue_id >= dev->data->nb_rx_queues) {
2513                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2514                 return 0;
2515         }
2516         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2517                                                 rx_pkts, nb_pkts);
2518 }
2519
2520 uint16_t
2521 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2522                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2523 {
2524         struct rte_eth_dev *dev;
2525
2526         VALID_PORTID_OR_ERR_RET(port_id, 0);
2527
2528         dev = &rte_eth_devices[port_id];
2529
2530         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2531         if (queue_id >= dev->data->nb_tx_queues) {
2532                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2533                 return 0;
2534         }
2535         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2536                                                 tx_pkts, nb_pkts);
2537 }
2538
2539 uint32_t
2540 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2541 {
2542         struct rte_eth_dev *dev;
2543
2544         VALID_PORTID_OR_ERR_RET(port_id, 0);
2545
2546         dev = &rte_eth_devices[port_id];
2547         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2548         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2549 }
2550
2551 int
2552 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2553 {
2554         struct rte_eth_dev *dev;
2555
2556         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2557
2558         dev = &rte_eth_devices[port_id];
2559         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2560         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2561                                                    offset);
2562 }
2563 #endif
2564
2565 int
2566 rte_eth_dev_callback_register(uint8_t port_id,
2567                         enum rte_eth_event_type event,
2568                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2569 {
2570         struct rte_eth_dev *dev;
2571         struct rte_eth_dev_callback *user_cb;
2572
2573         if (!cb_fn)
2574                 return -EINVAL;
2575
2576         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2577
2578         dev = &rte_eth_devices[port_id];
2579         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2580
2581         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2582                 if (user_cb->cb_fn == cb_fn &&
2583                         user_cb->cb_arg == cb_arg &&
2584                         user_cb->event == event) {
2585                         break;
2586                 }
2587         }
2588
2589         /* create a new callback. */
2590         if (user_cb == NULL)
2591                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2592                                       sizeof(struct rte_eth_dev_callback), 0);
2593         if (user_cb != NULL) {
2594                 user_cb->cb_fn = cb_fn;
2595                 user_cb->cb_arg = cb_arg;
2596                 user_cb->event = event;
2597                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2598         }
2599
2600         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2601         return (user_cb == NULL) ? -ENOMEM : 0;
2602 }
2603
2604 int
2605 rte_eth_dev_callback_unregister(uint8_t port_id,
2606                         enum rte_eth_event_type event,
2607                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2608 {
2609         int ret;
2610         struct rte_eth_dev *dev;
2611         struct rte_eth_dev_callback *cb, *next;
2612
2613         if (!cb_fn)
2614                 return -EINVAL;
2615
2616         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2617
2618         dev = &rte_eth_devices[port_id];
2619         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2620
2621         ret = 0;
2622         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2623
2624                 next = TAILQ_NEXT(cb, next);
2625
2626                 if (cb->cb_fn != cb_fn || cb->event != event ||
2627                                 (cb->cb_arg != (void *)-1 &&
2628                                 cb->cb_arg != cb_arg))
2629                         continue;
2630
2631                 /*
2632                  * if this callback is not executing right now,
2633                  * then remove it.
2634                  */
2635                 if (cb->active == 0) {
2636                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2637                         rte_free(cb);
2638                 } else {
2639                         ret = -EAGAIN;
2640                 }
2641         }
2642
2643         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2644         return ret;
2645 }
2646
2647 void
2648 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2649         enum rte_eth_event_type event)
2650 {
2651         struct rte_eth_dev_callback *cb_lst;
2652         struct rte_eth_dev_callback dev_cb;
2653
2654         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2655         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2656                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2657                         continue;
2658                 dev_cb = *cb_lst;
2659                 cb_lst->active = 1;
2660                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2661                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2662                                                 dev_cb.cb_arg);
2663                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2664                 cb_lst->active = 0;
2665         }
2666         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2667 }
2668
2669 int
2670 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2671 {
2672         uint32_t vec;
2673         struct rte_eth_dev *dev;
2674         struct rte_intr_handle *intr_handle;
2675         uint16_t qid;
2676         int rc;
2677
2678         if (!rte_eth_dev_is_valid_port(port_id)) {
2679                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2680                 return -ENODEV;
2681         }
2682
2683         dev = &rte_eth_devices[port_id];
2684         intr_handle = &dev->pci_dev->intr_handle;
2685         if (!intr_handle->intr_vec) {
2686                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2687                 return -EPERM;
2688         }
2689
2690         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2691                 vec = intr_handle->intr_vec[qid];
2692                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2693                 if (rc && rc != -EEXIST) {
2694                         PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2695                                         " op %d epfd %d vec %u\n",
2696                                         port_id, qid, op, epfd, vec);
2697                 }
2698         }
2699
2700         return 0;
2701 }
2702
2703 int
2704 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2705                           int epfd, int op, void *data)
2706 {
2707         uint32_t vec;
2708         struct rte_eth_dev *dev;
2709         struct rte_intr_handle *intr_handle;
2710         int rc;
2711
2712         if (!rte_eth_dev_is_valid_port(port_id)) {
2713                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2714                 return -ENODEV;
2715         }
2716
2717         dev = &rte_eth_devices[port_id];
2718         if (queue_id >= dev->data->nb_rx_queues) {
2719                 PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2720                 return -EINVAL;
2721         }
2722
2723         intr_handle = &dev->pci_dev->intr_handle;
2724         if (!intr_handle->intr_vec) {
2725                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2726                 return -EPERM;
2727         }
2728
2729         vec = intr_handle->intr_vec[queue_id];
2730         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2731         if (rc && rc != -EEXIST) {
2732                 PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2733                                 " op %d epfd %d vec %u\n",
2734                                 port_id, queue_id, op, epfd, vec);
2735                 return rc;
2736         }
2737
2738         return 0;
2739 }
2740
2741 int
2742 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2743                            uint16_t queue_id)
2744 {
2745         struct rte_eth_dev *dev;
2746
2747         if (!rte_eth_dev_is_valid_port(port_id)) {
2748                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2749                 return -ENODEV;
2750         }
2751
2752         dev = &rte_eth_devices[port_id];
2753
2754         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2755         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2756 }
2757
2758 int
2759 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2760                             uint16_t queue_id)
2761 {
2762         struct rte_eth_dev *dev;
2763
2764         if (!rte_eth_dev_is_valid_port(port_id)) {
2765                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2766                 return -ENODEV;
2767         }
2768
2769         dev = &rte_eth_devices[port_id];
2770
2771         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2772         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2773 }
2774
2775 #ifdef RTE_NIC_BYPASS
2776 int rte_eth_dev_bypass_init(uint8_t port_id)
2777 {
2778         struct rte_eth_dev *dev;
2779
2780         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2781
2782         dev = &rte_eth_devices[port_id];
2783         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2784         (*dev->dev_ops->bypass_init)(dev);
2785         return 0;
2786 }
2787
2788 int
2789 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2790 {
2791         struct rte_eth_dev *dev;
2792
2793         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2794
2795         dev = &rte_eth_devices[port_id];
2796         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2797         (*dev->dev_ops->bypass_state_show)(dev, state);
2798         return 0;
2799 }
2800
2801 int
2802 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2803 {
2804         struct rte_eth_dev *dev;
2805
2806         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2807
2808         dev = &rte_eth_devices[port_id];
2809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2810         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2811         return 0;
2812 }
2813
2814 int
2815 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2816 {
2817         struct rte_eth_dev *dev;
2818
2819         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2820
2821         dev = &rte_eth_devices[port_id];
2822         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2823         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2824         return 0;
2825 }
2826
2827 int
2828 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2829 {
2830         struct rte_eth_dev *dev;
2831
2832         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2833
2834         dev = &rte_eth_devices[port_id];
2835
2836         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2837         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2838         return 0;
2839 }
2840
2841 int
2842 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2843 {
2844         struct rte_eth_dev *dev;
2845
2846         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2847
2848         dev = &rte_eth_devices[port_id];
2849
2850         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2851         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2852         return 0;
2853 }
2854
2855 int
2856 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2857 {
2858         struct rte_eth_dev *dev;
2859
2860         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2861
2862         dev = &rte_eth_devices[port_id];
2863
2864         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2865         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2866         return 0;
2867 }
2868
2869 int
2870 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2871 {
2872         struct rte_eth_dev *dev;
2873
2874         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2875
2876         dev = &rte_eth_devices[port_id];
2877
2878         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2879         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2880         return 0;
2881 }
2882
2883 int
2884 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2885 {
2886         struct rte_eth_dev *dev;
2887
2888         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2889
2890         dev = &rte_eth_devices[port_id];
2891
2892         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2893         (*dev->dev_ops->bypass_wd_reset)(dev);
2894         return 0;
2895 }
2896 #endif
2897
2898 int
2899 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2900 {
2901         struct rte_eth_dev *dev;
2902
2903         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2904
2905         dev = &rte_eth_devices[port_id];
2906         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2907         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2908                                 RTE_ETH_FILTER_NOP, NULL);
2909 }
2910
2911 int
2912 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2913                        enum rte_filter_op filter_op, void *arg)
2914 {
2915         struct rte_eth_dev *dev;
2916
2917         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2918
2919         dev = &rte_eth_devices[port_id];
2920         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2921         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2922 }
2923
2924 void *
2925 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2926                 rte_rx_callback_fn fn, void *user_param)
2927 {
2928 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2929         rte_errno = ENOTSUP;
2930         return NULL;
2931 #endif
2932         /* check input parameters */
2933         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2934                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2935                 rte_errno = EINVAL;
2936                 return NULL;
2937         }
2938
2939         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2940
2941         if (cb == NULL) {
2942                 rte_errno = ENOMEM;
2943                 return NULL;
2944         }
2945
2946         cb->fn.rx = fn;
2947         cb->param = user_param;
2948
2949         /* Add the callbacks in fifo order. */
2950         struct rte_eth_rxtx_callback *tail =
2951                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2952
2953         if (!tail) {
2954                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2955
2956         } else {
2957                 while (tail->next)
2958                         tail = tail->next;
2959                 tail->next = cb;
2960         }
2961
2962         return cb;
2963 }
2964
2965 void *
2966 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2967                 rte_tx_callback_fn fn, void *user_param)
2968 {
2969 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2970         rte_errno = ENOTSUP;
2971         return NULL;
2972 #endif
2973         /* check input parameters */
2974         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2975                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2976                 rte_errno = EINVAL;
2977                 return NULL;
2978         }
2979
2980         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2981
2982         if (cb == NULL) {
2983                 rte_errno = ENOMEM;
2984                 return NULL;
2985         }
2986
2987         cb->fn.tx = fn;
2988         cb->param = user_param;
2989
2990         /* Add the callbacks in fifo order. */
2991         struct rte_eth_rxtx_callback *tail =
2992                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2993
2994         if (!tail) {
2995                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2996
2997         } else {
2998                 while (tail->next)
2999                         tail = tail->next;
3000                 tail->next = cb;
3001         }
3002
3003         return cb;
3004 }
3005
3006 int
3007 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3008                 struct rte_eth_rxtx_callback *user_cb)
3009 {
3010 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3011         return -ENOTSUP;
3012 #endif
3013         /* Check input parameters. */
3014         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3015                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3016                 return -EINVAL;
3017         }
3018
3019         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3020         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3021         struct rte_eth_rxtx_callback *prev_cb;
3022
3023         /* Reset head pointer and remove user cb if first in the list. */
3024         if (cb == user_cb) {
3025                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3026                 return 0;
3027         }
3028
3029         /* Remove the user cb from the callback list. */
3030         do {
3031                 prev_cb = cb;
3032                 cb = cb->next;
3033
3034                 if (cb == user_cb) {
3035                         prev_cb->next = user_cb->next;
3036                         return 0;
3037                 }
3038
3039         } while (cb != NULL);
3040
3041         /* Callback wasn't found. */
3042         return -EINVAL;
3043 }
3044
3045 int
3046 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3047                 struct rte_eth_rxtx_callback *user_cb)
3048 {
3049 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3050         return -ENOTSUP;
3051 #endif
3052         /* Check input parameters. */
3053         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3054                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3055                 return -EINVAL;
3056         }
3057
3058         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3059         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3060         struct rte_eth_rxtx_callback *prev_cb;
3061
3062         /* Reset head pointer and remove user cb if first in the list. */
3063         if (cb == user_cb) {
3064                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3065                 return 0;
3066         }
3067
3068         /* Remove the user cb from the callback list. */
3069         do {
3070                 prev_cb = cb;
3071                 cb = cb->next;
3072
3073                 if (cb == user_cb) {
3074                         prev_cb->next = user_cb->next;
3075                         return 0;
3076                 }
3077
3078         } while (cb != NULL);
3079
3080         /* Callback wasn't found. */
3081         return -EINVAL;
3082 }
3083
3084 int
3085 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3086         struct rte_eth_rxq_info *qinfo)
3087 {
3088         struct rte_eth_dev *dev;
3089
3090         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3091
3092         if (qinfo == NULL)
3093                 return -EINVAL;
3094
3095         dev = &rte_eth_devices[port_id];
3096         if (queue_id >= dev->data->nb_rx_queues) {
3097                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3098                 return -EINVAL;
3099         }
3100
3101         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3102
3103         memset(qinfo, 0, sizeof(*qinfo));
3104         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3105         return 0;
3106 }
3107
3108 int
3109 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3110         struct rte_eth_txq_info *qinfo)
3111 {
3112         struct rte_eth_dev *dev;
3113
3114         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3115
3116         if (qinfo == NULL)
3117                 return -EINVAL;
3118
3119         dev = &rte_eth_devices[port_id];
3120         if (queue_id >= dev->data->nb_tx_queues) {
3121                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3122                 return -EINVAL;
3123         }
3124
3125         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3126
3127         memset(qinfo, 0, sizeof(*qinfo));
3128         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3129         return 0;
3130 }
3131
3132 int
3133 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3134                              struct ether_addr *mc_addr_set,
3135                              uint32_t nb_mc_addr)
3136 {
3137         struct rte_eth_dev *dev;
3138
3139         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3140
3141         dev = &rte_eth_devices[port_id];
3142         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3143         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3144 }
3145
3146 int
3147 rte_eth_timesync_enable(uint8_t port_id)
3148 {
3149         struct rte_eth_dev *dev;
3150
3151         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152         dev = &rte_eth_devices[port_id];
3153
3154         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3155         return (*dev->dev_ops->timesync_enable)(dev);
3156 }
3157
3158 int
3159 rte_eth_timesync_disable(uint8_t port_id)
3160 {
3161         struct rte_eth_dev *dev;
3162
3163         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3164         dev = &rte_eth_devices[port_id];
3165
3166         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3167         return (*dev->dev_ops->timesync_disable)(dev);
3168 }
3169
3170 int
3171 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3172                                    uint32_t flags)
3173 {
3174         struct rte_eth_dev *dev;
3175
3176         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3177         dev = &rte_eth_devices[port_id];
3178
3179         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3180         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3181 }
3182
3183 int
3184 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3185 {
3186         struct rte_eth_dev *dev;
3187
3188         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3189         dev = &rte_eth_devices[port_id];
3190
3191         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3192         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3193 }
3194
3195 int
3196 rte_eth_dev_get_reg_length(uint8_t port_id)
3197 {
3198         struct rte_eth_dev *dev;
3199
3200         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3201
3202         dev = &rte_eth_devices[port_id];
3203         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3204         return (*dev->dev_ops->get_reg_length)(dev);
3205 }
3206
3207 int
3208 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3209 {
3210         struct rte_eth_dev *dev;
3211
3212         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3213
3214         dev = &rte_eth_devices[port_id];
3215         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3216         return (*dev->dev_ops->get_reg)(dev, info);
3217 }
3218
3219 int
3220 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3221 {
3222         struct rte_eth_dev *dev;
3223
3224         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3225
3226         dev = &rte_eth_devices[port_id];
3227         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3228         return (*dev->dev_ops->get_eeprom_length)(dev);
3229 }
3230
3231 int
3232 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3233 {
3234         struct rte_eth_dev *dev;
3235
3236         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3237
3238         dev = &rte_eth_devices[port_id];
3239         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3240         return (*dev->dev_ops->get_eeprom)(dev, info);
3241 }
3242
3243 int
3244 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3245 {
3246         struct rte_eth_dev *dev;
3247
3248         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3249
3250         dev = &rte_eth_devices[port_id];
3251         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3252         return (*dev->dev_ops->set_eeprom)(dev, info);
3253 }
3254
3255 int
3256 rte_eth_dev_get_dcb_info(uint8_t port_id,
3257                              struct rte_eth_dcb_info *dcb_info)
3258 {
3259         struct rte_eth_dev *dev;
3260
3261         if (!rte_eth_dev_is_valid_port(port_id)) {
3262                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3263                 return -ENODEV;
3264         }
3265
3266         dev = &rte_eth_devices[port_id];
3267         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3268
3269         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3270         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3271 }
3272
3273 void
3274 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3275 {
3276         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3277                 PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3278                                 eth_dev, pci_dev);
3279                 return;
3280         }
3281
3282         eth_dev->data->dev_flags = 0;
3283         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3284                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3285         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3286                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3287
3288         eth_dev->data->kdrv = pci_dev->kdrv;
3289         eth_dev->data->numa_node = pci_dev->numa_node;
3290         eth_dev->data->drv_name = pci_dev->driver->name;
3291 }