pci: fix detach and uninit naming
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
145         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
146         {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
147 };
148 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
149
150 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
151         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
152         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
153 };
154 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
155                 sizeof(rte_rxq_stats_strings[0]))
156
157 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
158         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
159         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
160         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
161 };
162 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
163                 sizeof(rte_txq_stats_strings[0]))
164
165
166 /**
167  * The user application callback description.
168  *
169  * It contains callback address to be registered by user application,
170  * the pointer to the parameters for callback, and the event type.
171  */
172 struct rte_eth_dev_callback {
173         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
174         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
175         void *cb_arg;                           /**< Parameter for callback */
176         enum rte_eth_event_type event;          /**< Interrupt event type */
177         uint32_t active;                        /**< Callback is executing */
178 };
179
180 enum {
181         STAT_QMAP_TX = 0,
182         STAT_QMAP_RX
183 };
184
185 enum {
186         DEV_DETACHED = 0,
187         DEV_ATTACHED
188 };
189
190 static void
191 rte_eth_dev_data_alloc(void)
192 {
193         const unsigned flags = 0;
194         const struct rte_memzone *mz;
195
196         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
197                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
198                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
199                                 rte_socket_id(), flags);
200         } else
201                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
202         if (mz == NULL)
203                 rte_panic("Cannot allocate memzone for ethernet port data\n");
204
205         rte_eth_dev_data = mz->addr;
206         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
207                 memset(rte_eth_dev_data, 0,
208                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
209 }
210
211 struct rte_eth_dev *
212 rte_eth_dev_allocated(const char *name)
213 {
214         unsigned i;
215
216         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
217                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
218                     strcmp(rte_eth_devices[i].data->name, name) == 0)
219                         return &rte_eth_devices[i];
220         }
221         return NULL;
222 }
223
224 static uint8_t
225 rte_eth_dev_find_free_port(void)
226 {
227         unsigned i;
228
229         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
230                 if (rte_eth_devices[i].attached == DEV_DETACHED)
231                         return i;
232         }
233         return RTE_MAX_ETHPORTS;
234 }
235
236 struct rte_eth_dev *
237 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
238 {
239         uint8_t port_id;
240         struct rte_eth_dev *eth_dev;
241
242         port_id = rte_eth_dev_find_free_port();
243         if (port_id == RTE_MAX_ETHPORTS) {
244                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
245                 return NULL;
246         }
247
248         if (rte_eth_dev_data == NULL)
249                 rte_eth_dev_data_alloc();
250
251         if (rte_eth_dev_allocated(name) != NULL) {
252                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
253                                 name);
254                 return NULL;
255         }
256
257         eth_dev = &rte_eth_devices[port_id];
258         eth_dev->data = &rte_eth_dev_data[port_id];
259         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
260         eth_dev->data->port_id = port_id;
261         eth_dev->attached = DEV_ATTACHED;
262         eth_dev->dev_type = type;
263         nb_ports++;
264         return eth_dev;
265 }
266
267 static int
268 rte_eth_dev_create_unique_device_name(char *name, size_t size,
269                 struct rte_pci_device *pci_dev)
270 {
271         int ret;
272
273         if ((name == NULL) || (pci_dev == NULL))
274                 return -EINVAL;
275
276         ret = snprintf(name, size, "%d:%d.%d",
277                         pci_dev->addr.bus, pci_dev->addr.devid,
278                         pci_dev->addr.function);
279         if (ret < 0)
280                 return ret;
281         return 0;
282 }
283
284 int
285 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
286 {
287         if (eth_dev == NULL)
288                 return -EINVAL;
289
290         eth_dev->attached = 0;
291         nb_ports--;
292         return 0;
293 }
294
295 static int
296 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
297                  struct rte_pci_device *pci_dev)
298 {
299         struct eth_driver    *eth_drv;
300         struct rte_eth_dev *eth_dev;
301         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
302
303         int diag;
304
305         eth_drv = (struct eth_driver *)pci_drv;
306
307         /* Create unique Ethernet device name using PCI address */
308         rte_eth_dev_create_unique_device_name(ethdev_name,
309                         sizeof(ethdev_name), pci_dev);
310
311         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
312         if (eth_dev == NULL)
313                 return -ENOMEM;
314
315         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
316                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
317                                   eth_drv->dev_private_size,
318                                   RTE_CACHE_LINE_SIZE);
319                 if (eth_dev->data->dev_private == NULL)
320                         rte_panic("Cannot allocate memzone for private port data\n");
321         }
322         eth_dev->pci_dev = pci_dev;
323         eth_dev->driver = eth_drv;
324         eth_dev->data->rx_mbuf_alloc_failed = 0;
325
326         /* init user callbacks */
327         TAILQ_INIT(&(eth_dev->link_intr_cbs));
328
329         /*
330          * Set the default MTU.
331          */
332         eth_dev->data->mtu = ETHER_MTU;
333
334         /* Invoke PMD device initialization function */
335         diag = (*eth_drv->eth_dev_init)(eth_dev);
336         if (diag == 0)
337                 return 0;
338
339         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
340                         pci_drv->name,
341                         (unsigned) pci_dev->id.vendor_id,
342                         (unsigned) pci_dev->id.device_id);
343         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
344                 rte_free(eth_dev->data->dev_private);
345         eth_dev->attached = DEV_DETACHED;
346         nb_ports--;
347         return diag;
348 }
349
350 static int
351 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
352 {
353         const struct eth_driver *eth_drv;
354         struct rte_eth_dev *eth_dev;
355         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
356         int ret;
357
358         if (pci_dev == NULL)
359                 return -EINVAL;
360
361         /* Create unique Ethernet device name using PCI address */
362         rte_eth_dev_create_unique_device_name(ethdev_name,
363                         sizeof(ethdev_name), pci_dev);
364
365         eth_dev = rte_eth_dev_allocated(ethdev_name);
366         if (eth_dev == NULL)
367                 return -ENODEV;
368
369         eth_drv = (const struct eth_driver *)pci_dev->driver;
370
371         /* Invoke PMD device uninit function */
372         if (*eth_drv->eth_dev_uninit) {
373                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
374                 if (ret)
375                         return ret;
376         }
377
378         /* free ether device */
379         rte_eth_dev_release_port(eth_dev);
380
381         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
382                 rte_free(eth_dev->data->dev_private);
383
384         eth_dev->pci_dev = NULL;
385         eth_dev->driver = NULL;
386         eth_dev->data = NULL;
387
388         return 0;
389 }
390
391 /**
392  * Register an Ethernet [Poll Mode] driver.
393  *
394  * Function invoked by the initialization function of an Ethernet driver
395  * to simultaneously register itself as a PCI driver and as an Ethernet
396  * Poll Mode Driver.
397  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
398  * structure embedded in the *eth_drv* structure, after having stored the
399  * address of the rte_eth_dev_init() function in the *devinit* field of
400  * the *pci_drv* structure.
401  * During the PCI probing phase, the rte_eth_dev_init() function is
402  * invoked for each PCI [Ethernet device] matching the embedded PCI
403  * identifiers provided by the driver.
404  */
405 void
406 rte_eth_driver_register(struct eth_driver *eth_drv)
407 {
408         eth_drv->pci_drv.devinit = rte_eth_dev_init;
409         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
410         rte_eal_pci_register(&eth_drv->pci_drv);
411 }
412
413 static int
414 rte_eth_dev_is_valid_port(uint8_t port_id)
415 {
416         if (port_id >= RTE_MAX_ETHPORTS ||
417             rte_eth_devices[port_id].attached != DEV_ATTACHED)
418                 return 0;
419         else
420                 return 1;
421 }
422
423 int
424 rte_eth_dev_socket_id(uint8_t port_id)
425 {
426         if (!rte_eth_dev_is_valid_port(port_id))
427                 return -1;
428         return rte_eth_devices[port_id].pci_dev->numa_node;
429 }
430
431 uint8_t
432 rte_eth_dev_count(void)
433 {
434         return nb_ports;
435 }
436
437 static enum rte_eth_dev_type
438 rte_eth_dev_get_device_type(uint8_t port_id)
439 {
440         if (!rte_eth_dev_is_valid_port(port_id))
441                 return RTE_ETH_DEV_UNKNOWN;
442         return rte_eth_devices[port_id].dev_type;
443 }
444
445 static int
446 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
447 {
448         if ((devs == NULL) ||
449             (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
450                 return -EINVAL;
451
452         /* save current rte_eth_devices */
453         memcpy(devs, rte_eth_devices, size);
454         return 0;
455 }
456
457 static int
458 rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
459 {
460         if ((devs == NULL) || (port_id == NULL))
461                 return -EINVAL;
462
463         /* check which port was attached or detached */
464         for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
465                 if (rte_eth_devices[*port_id].attached ^ devs->attached)
466                         return 0;
467         }
468         return -ENODEV;
469 }
470
471 static int
472 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
473 {
474         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
475
476         if (addr == NULL) {
477                 PMD_DEBUG_TRACE("Null pointer is specified\n");
478                 return -EINVAL;
479         }
480
481         *addr = rte_eth_devices[port_id].pci_dev->addr;
482         return 0;
483 }
484
485 static int
486 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
487 {
488         char *tmp;
489
490         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
491
492         if (name == NULL) {
493                 PMD_DEBUG_TRACE("Null pointer is specified\n");
494                 return -EINVAL;
495         }
496
497         /* shouldn't check 'rte_eth_devices[i].data',
498          * because it might be overwritten by VDEV PMD */
499         tmp = rte_eth_dev_data[port_id].name;
500         strcpy(name, tmp);
501         return 0;
502 }
503
504 static int
505 rte_eth_dev_is_detachable(uint8_t port_id)
506 {
507         uint32_t drv_flags;
508
509         if (port_id >= RTE_MAX_ETHPORTS) {
510                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
511                 return -EINVAL;
512         }
513
514         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
515                 switch (rte_eth_devices[port_id].pci_dev->kdrv) {
516                 case RTE_KDRV_IGB_UIO:
517                 case RTE_KDRV_UIO_GENERIC:
518                 case RTE_KDRV_NIC_UIO:
519                         break;
520                 case RTE_KDRV_VFIO:
521                 default:
522                         return -ENOTSUP;
523                 }
524         }
525
526         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
527         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
528 }
529
530 /* attach the new physical device, then store port_id of the device */
531 static int
532 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
533 {
534         uint8_t new_port_id;
535         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
536
537         if ((addr == NULL) || (port_id == NULL))
538                 goto err;
539
540         /* save current port status */
541         if (rte_eth_dev_save(devs, sizeof(devs)))
542                 goto err;
543         /* re-construct pci_device_list */
544         if (rte_eal_pci_scan())
545                 goto err;
546         /* invoke probe func of the driver can handle the new device.
547          * TODO:
548          * rte_eal_pci_probe_one() should return port_id.
549          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
550          * should be removed. */
551         if (rte_eal_pci_probe_one(addr))
552                 goto err;
553         /* get port_id enabled by above procedures */
554         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
555                 goto err;
556
557         *port_id = new_port_id;
558         return 0;
559 err:
560         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
561         return -1;
562 }
563
564 /* detach the new physical device, then store pci_addr of the device */
565 static int
566 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
567 {
568         struct rte_pci_addr freed_addr;
569         struct rte_pci_addr vp;
570
571         if (addr == NULL)
572                 goto err;
573
574         /* check whether the driver supports detach feature, or not */
575         if (rte_eth_dev_is_detachable(port_id))
576                 goto err;
577
578         /* get pci address by port id */
579         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
580                 goto err;
581
582         /* Zeroed pci addr means the port comes from virtual device */
583         vp.domain = vp.bus = vp.devid = vp.function = 0;
584         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
585                 goto err;
586
587         /* invoke devuninit func of the pci driver,
588          * also remove the device from pci_device_list */
589         if (rte_eal_pci_detach(&freed_addr))
590                 goto err;
591
592         *addr = freed_addr;
593         return 0;
594 err:
595         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
596         return -1;
597 }
598
599 /* attach the new virtual device, then store port_id of the device */
600 static int
601 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
602 {
603         char *name = NULL, *args = NULL;
604         uint8_t new_port_id;
605         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
606         int ret = -1;
607
608         if ((vdevargs == NULL) || (port_id == NULL))
609                 goto end;
610
611         /* parse vdevargs, then retrieve device name and args */
612         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
613                 goto end;
614
615         /* save current port status */
616         if (rte_eth_dev_save(devs, sizeof(devs)))
617                 goto end;
618         /* walk around dev_driver_list to find the driver of the device,
619          * then invoke probe function o the driver.
620          * TODO:
621          * rte_eal_vdev_init() should return port_id,
622          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
623          * should be removed. */
624         if (rte_eal_vdev_init(name, args))
625                 goto end;
626         /* get port_id enabled by above procedures */
627         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
628                 goto end;
629         ret = 0;
630         *port_id = new_port_id;
631 end:
632         if (name)
633                 free(name);
634         if (args)
635                 free(args);
636
637         if (ret < 0)
638                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
639         return ret;
640 }
641
642 /* detach the new virtual device, then store the name of the device */
643 static int
644 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
645 {
646         char name[RTE_ETH_NAME_MAX_LEN];
647
648         if (vdevname == NULL)
649                 goto err;
650
651         /* check whether the driver supports detach feature, or not */
652         if (rte_eth_dev_is_detachable(port_id))
653                 goto err;
654
655         /* get device name by port id */
656         if (rte_eth_dev_get_name_by_port(port_id, name))
657                 goto err;
658         /* walk around dev_driver_list to find the driver of the device,
659          * then invoke uninit function of the driver */
660         if (rte_eal_vdev_uninit(name))
661                 goto err;
662
663         strncpy(vdevname, name, sizeof(name));
664         return 0;
665 err:
666         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
667         return -1;
668 }
669
670 /* attach the new device, then store port_id of the device */
671 int
672 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
673 {
674         struct rte_pci_addr addr;
675
676         if ((devargs == NULL) || (port_id == NULL))
677                 return -EINVAL;
678
679         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
680                 return rte_eth_dev_attach_pdev(&addr, port_id);
681         else
682                 return rte_eth_dev_attach_vdev(devargs, port_id);
683 }
684
685 /* detach the device, then store the name of the device */
686 int
687 rte_eth_dev_detach(uint8_t port_id, char *name)
688 {
689         struct rte_pci_addr addr;
690         int ret;
691
692         if (name == NULL)
693                 return -EINVAL;
694
695         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
696                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
697                 if (ret < 0)
698                         return ret;
699
700                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
701                 if (ret == 0)
702                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
703                                 "%04x:%02x:%02x.%d",
704                                 addr.domain, addr.bus,
705                                 addr.devid, addr.function);
706
707                 return ret;
708         } else
709                 return rte_eth_dev_detach_vdev(port_id, name);
710 }
711
712 static int
713 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
714 {
715         uint16_t old_nb_queues = dev->data->nb_rx_queues;
716         void **rxq;
717         unsigned i;
718
719         if (dev->data->rx_queues == NULL) { /* first time configuration */
720                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
721                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
722                                 RTE_CACHE_LINE_SIZE);
723                 if (dev->data->rx_queues == NULL) {
724                         dev->data->nb_rx_queues = 0;
725                         return -(ENOMEM);
726                 }
727         } else { /* re-configure */
728                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
729
730                 rxq = dev->data->rx_queues;
731
732                 for (i = nb_queues; i < old_nb_queues; i++)
733                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
734                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
735                                 RTE_CACHE_LINE_SIZE);
736                 if (rxq == NULL)
737                         return -(ENOMEM);
738                 if (nb_queues > old_nb_queues) {
739                         uint16_t new_qs = nb_queues - old_nb_queues;
740
741                         memset(rxq + old_nb_queues, 0,
742                                 sizeof(rxq[0]) * new_qs);
743                 }
744
745                 dev->data->rx_queues = rxq;
746
747         }
748         dev->data->nb_rx_queues = nb_queues;
749         return 0;
750 }
751
752 int
753 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
754 {
755         struct rte_eth_dev *dev;
756
757         /* This function is only safe when called from the primary process
758          * in a multi-process setup*/
759         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
760
761         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
762
763         dev = &rte_eth_devices[port_id];
764         if (rx_queue_id >= dev->data->nb_rx_queues) {
765                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
766                 return -EINVAL;
767         }
768
769         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
770
771         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
772
773 }
774
775 int
776 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
777 {
778         struct rte_eth_dev *dev;
779
780         /* This function is only safe when called from the primary process
781          * in a multi-process setup*/
782         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
783
784         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
785
786         dev = &rte_eth_devices[port_id];
787         if (rx_queue_id >= dev->data->nb_rx_queues) {
788                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
789                 return -EINVAL;
790         }
791
792         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
793
794         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
795
796 }
797
798 int
799 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
800 {
801         struct rte_eth_dev *dev;
802
803         /* This function is only safe when called from the primary process
804          * in a multi-process setup*/
805         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
806
807         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
808
809         dev = &rte_eth_devices[port_id];
810         if (tx_queue_id >= dev->data->nb_tx_queues) {
811                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
812                 return -EINVAL;
813         }
814
815         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
816
817         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
818
819 }
820
821 int
822 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
823 {
824         struct rte_eth_dev *dev;
825
826         /* This function is only safe when called from the primary process
827          * in a multi-process setup*/
828         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
829
830         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
831
832         dev = &rte_eth_devices[port_id];
833         if (tx_queue_id >= dev->data->nb_tx_queues) {
834                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
835                 return -EINVAL;
836         }
837
838         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
839
840         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
841
842 }
843
844 static int
845 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
846 {
847         uint16_t old_nb_queues = dev->data->nb_tx_queues;
848         void **txq;
849         unsigned i;
850
851         if (dev->data->tx_queues == NULL) { /* first time configuration */
852                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
853                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
854                                                    RTE_CACHE_LINE_SIZE);
855                 if (dev->data->tx_queues == NULL) {
856                         dev->data->nb_tx_queues = 0;
857                         return -(ENOMEM);
858                 }
859         } else { /* re-configure */
860                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
861
862                 txq = dev->data->tx_queues;
863
864                 for (i = nb_queues; i < old_nb_queues; i++)
865                         (*dev->dev_ops->tx_queue_release)(txq[i]);
866                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
867                                   RTE_CACHE_LINE_SIZE);
868                 if (txq == NULL)
869                         return -ENOMEM;
870                 if (nb_queues > old_nb_queues) {
871                         uint16_t new_qs = nb_queues - old_nb_queues;
872
873                         memset(txq + old_nb_queues, 0,
874                                sizeof(txq[0]) * new_qs);
875                 }
876
877                 dev->data->tx_queues = txq;
878
879         }
880         dev->data->nb_tx_queues = nb_queues;
881         return 0;
882 }
883
884 static int
885 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
886 {
887         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
888
889         switch (nb_rx_q) {
890         case 1:
891         case 2:
892                 RTE_ETH_DEV_SRIOV(dev).active =
893                         ETH_64_POOLS;
894                 break;
895         case 4:
896                 RTE_ETH_DEV_SRIOV(dev).active =
897                         ETH_32_POOLS;
898                 break;
899         default:
900                 return -EINVAL;
901         }
902
903         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
904         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
905                 dev->pci_dev->max_vfs * nb_rx_q;
906
907         return 0;
908 }
909
910 static int
911 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
912                           const struct rte_eth_conf *dev_conf)
913 {
914         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
915
916         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
917                 /* check multi-queue mode */
918                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
919                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
920                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
921                         /* SRIOV only works in VMDq enable mode */
922                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
923                                         " SRIOV active, "
924                                         "wrong VMDQ mq_mode rx %u tx %u\n",
925                                         port_id,
926                                         dev_conf->rxmode.mq_mode,
927                                         dev_conf->txmode.mq_mode);
928                         return -EINVAL;
929                 }
930
931                 switch (dev_conf->rxmode.mq_mode) {
932                 case ETH_MQ_RX_VMDQ_DCB:
933                 case ETH_MQ_RX_VMDQ_DCB_RSS:
934                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
935                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
936                                         " SRIOV active, "
937                                         "unsupported VMDQ mq_mode rx %u\n",
938                                         port_id, dev_conf->rxmode.mq_mode);
939                         return -EINVAL;
940                 case ETH_MQ_RX_RSS:
941                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
942                                         " SRIOV active, "
943                                         "Rx mq mode is changed from:"
944                                         "mq_mode %u into VMDQ mq_mode %u\n",
945                                         port_id,
946                                         dev_conf->rxmode.mq_mode,
947                                         dev->data->dev_conf.rxmode.mq_mode);
948                 case ETH_MQ_RX_VMDQ_RSS:
949                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
950                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
951                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
952                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
953                                                         " SRIOV active, invalid queue"
954                                                         " number for VMDQ RSS, allowed"
955                                                         " value are 1, 2 or 4\n",
956                                                         port_id);
957                                         return -EINVAL;
958                                 }
959                         break;
960                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
961                         /* if nothing mq mode configure, use default scheme */
962                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
963                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
964                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
965                         break;
966                 }
967
968                 switch (dev_conf->txmode.mq_mode) {
969                 case ETH_MQ_TX_VMDQ_DCB:
970                         /* DCB VMDQ in SRIOV mode, not implement yet */
971                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
972                                         " SRIOV active, "
973                                         "unsupported VMDQ mq_mode tx %u\n",
974                                         port_id, dev_conf->txmode.mq_mode);
975                         return -EINVAL;
976                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
977                         /* if nothing mq mode configure, use default scheme */
978                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
979                         break;
980                 }
981
982                 /* check valid queue number */
983                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
984                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
985                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
986                                         "queue number must less equal to %d\n",
987                                         port_id,
988                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
989                         return -EINVAL;
990                 }
991         } else {
992                 /* For vmdb+dcb mode check our configuration before we go further */
993                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
994                         const struct rte_eth_vmdq_dcb_conf *conf;
995
996                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
997                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
998                                                 "!= %d\n",
999                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1000                                 return -EINVAL;
1001                         }
1002                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
1003                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1004                               conf->nb_queue_pools == ETH_32_POOLS)) {
1005                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1006                                                 "nb_queue_pools must be %d or %d\n",
1007                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1008                                 return -EINVAL;
1009                         }
1010                 }
1011                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1012                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1013
1014                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1015                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
1016                                                 "!= %d\n",
1017                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1018                                 return -EINVAL;
1019                         }
1020                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
1021                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1022                               conf->nb_queue_pools == ETH_32_POOLS)) {
1023                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1024                                                 "nb_queue_pools != %d or nb_queue_pools "
1025                                                 "!= %d\n",
1026                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1027                                 return -EINVAL;
1028                         }
1029                 }
1030
1031                 /* For DCB mode check our configuration before we go further */
1032                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1033                         const struct rte_eth_dcb_rx_conf *conf;
1034
1035                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
1036                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
1037                                                 "!= %d\n",
1038                                                 port_id, ETH_DCB_NUM_QUEUES);
1039                                 return -EINVAL;
1040                         }
1041                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
1042                         if (!(conf->nb_tcs == ETH_4_TCS ||
1043                               conf->nb_tcs == ETH_8_TCS)) {
1044                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1045                                                 "nb_tcs != %d or nb_tcs "
1046                                                 "!= %d\n",
1047                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1048                                 return -EINVAL;
1049                         }
1050                 }
1051
1052                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1053                         const struct rte_eth_dcb_tx_conf *conf;
1054
1055                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
1056                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
1057                                                 "!= %d\n",
1058                                                 port_id, ETH_DCB_NUM_QUEUES);
1059                                 return -EINVAL;
1060                         }
1061                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
1062                         if (!(conf->nb_tcs == ETH_4_TCS ||
1063                               conf->nb_tcs == ETH_8_TCS)) {
1064                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1065                                                 "nb_tcs != %d or nb_tcs "
1066                                                 "!= %d\n",
1067                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1068                                 return -EINVAL;
1069                         }
1070                 }
1071         }
1072         return 0;
1073 }
1074
1075 int
1076 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1077                       const struct rte_eth_conf *dev_conf)
1078 {
1079         struct rte_eth_dev *dev;
1080         struct rte_eth_dev_info dev_info;
1081         int diag;
1082
1083         /* This function is only safe when called from the primary process
1084          * in a multi-process setup*/
1085         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1086
1087         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1088
1089         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1090                 PMD_DEBUG_TRACE(
1091                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1092                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1093                 return -EINVAL;
1094         }
1095
1096         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1097                 PMD_DEBUG_TRACE(
1098                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1099                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1100                 return -EINVAL;
1101         }
1102
1103         dev = &rte_eth_devices[port_id];
1104
1105         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1106         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1107
1108         if (dev->data->dev_started) {
1109                 PMD_DEBUG_TRACE(
1110                     "port %d must be stopped to allow configuration\n", port_id);
1111                 return -EBUSY;
1112         }
1113
1114         /*
1115          * Check that the numbers of RX and TX queues are not greater
1116          * than the maximum number of RX and TX queues supported by the
1117          * configured device.
1118          */
1119         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1120         if (nb_rx_q > dev_info.max_rx_queues) {
1121                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1122                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1123                 return -EINVAL;
1124         }
1125         if (nb_rx_q == 0) {
1126                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
1127                 return -EINVAL;
1128         }
1129
1130         if (nb_tx_q > dev_info.max_tx_queues) {
1131                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1132                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1133                 return -EINVAL;
1134         }
1135         if (nb_tx_q == 0) {
1136                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
1137                 return -EINVAL;
1138         }
1139
1140         /* Copy the dev_conf parameter into the dev structure */
1141         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1142
1143         /*
1144          * If link state interrupt is enabled, check that the
1145          * device supports it.
1146          */
1147         if (dev_conf->intr_conf.lsc == 1) {
1148                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
1149
1150                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1151                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1152                                         pci_drv->name);
1153                         return -EINVAL;
1154                 }
1155         }
1156
1157         /*
1158          * If jumbo frames are enabled, check that the maximum RX packet
1159          * length is supported by the configured device.
1160          */
1161         if (dev_conf->rxmode.jumbo_frame == 1) {
1162                 if (dev_conf->rxmode.max_rx_pkt_len >
1163                     dev_info.max_rx_pktlen) {
1164                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1165                                 " > max valid value %u\n",
1166                                 port_id,
1167                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1168                                 (unsigned)dev_info.max_rx_pktlen);
1169                         return -EINVAL;
1170                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1171                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1172                                 " < min valid value %u\n",
1173                                 port_id,
1174                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1175                                 (unsigned)ETHER_MIN_LEN);
1176                         return -EINVAL;
1177                 }
1178         } else {
1179                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1180                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1181                         /* Use default value */
1182                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1183                                                         ETHER_MAX_LEN;
1184         }
1185
1186         /* multiple queue mode checking */
1187         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
1188         if (diag != 0) {
1189                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
1190                                 port_id, diag);
1191                 return diag;
1192         }
1193
1194         /*
1195          * Setup new number of RX/TX queues and reconfigure device.
1196          */
1197         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1198         if (diag != 0) {
1199                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1200                                 port_id, diag);
1201                 return diag;
1202         }
1203
1204         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1205         if (diag != 0) {
1206                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1207                                 port_id, diag);
1208                 rte_eth_dev_rx_queue_config(dev, 0);
1209                 return diag;
1210         }
1211
1212         diag = (*dev->dev_ops->dev_configure)(dev);
1213         if (diag != 0) {
1214                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1215                                 port_id, diag);
1216                 rte_eth_dev_rx_queue_config(dev, 0);
1217                 rte_eth_dev_tx_queue_config(dev, 0);
1218                 return diag;
1219         }
1220
1221         return 0;
1222 }
1223
1224 static void
1225 rte_eth_dev_config_restore(uint8_t port_id)
1226 {
1227         struct rte_eth_dev *dev;
1228         struct rte_eth_dev_info dev_info;
1229         struct ether_addr addr;
1230         uint16_t i;
1231         uint32_t pool = 0;
1232
1233         dev = &rte_eth_devices[port_id];
1234
1235         rte_eth_dev_info_get(port_id, &dev_info);
1236
1237         if (RTE_ETH_DEV_SRIOV(dev).active)
1238                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1239
1240         /* replay MAC address configuration */
1241         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1242                 addr = dev->data->mac_addrs[i];
1243
1244                 /* skip zero address */
1245                 if (is_zero_ether_addr(&addr))
1246                         continue;
1247
1248                 /* add address to the hardware */
1249                 if  (*dev->dev_ops->mac_addr_add &&
1250                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1251                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1252                 else {
1253                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1254                                         port_id);
1255                         /* exit the loop but not return an error */
1256                         break;
1257                 }
1258         }
1259
1260         /* replay promiscuous configuration */
1261         if (rte_eth_promiscuous_get(port_id) == 1)
1262                 rte_eth_promiscuous_enable(port_id);
1263         else if (rte_eth_promiscuous_get(port_id) == 0)
1264                 rte_eth_promiscuous_disable(port_id);
1265
1266         /* replay all multicast configuration */
1267         if (rte_eth_allmulticast_get(port_id) == 1)
1268                 rte_eth_allmulticast_enable(port_id);
1269         else if (rte_eth_allmulticast_get(port_id) == 0)
1270                 rte_eth_allmulticast_disable(port_id);
1271 }
1272
1273 int
1274 rte_eth_dev_start(uint8_t port_id)
1275 {
1276         struct rte_eth_dev *dev;
1277         int diag;
1278
1279         /* This function is only safe when called from the primary process
1280          * in a multi-process setup*/
1281         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1282
1283         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1284
1285         dev = &rte_eth_devices[port_id];
1286
1287         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1288
1289         if (dev->data->dev_started != 0) {
1290                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1291                         " already started\n",
1292                         port_id);
1293                 return 0;
1294         }
1295
1296         diag = (*dev->dev_ops->dev_start)(dev);
1297         if (diag == 0)
1298                 dev->data->dev_started = 1;
1299         else
1300                 return diag;
1301
1302         rte_eth_dev_config_restore(port_id);
1303
1304         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1305                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1306                 (*dev->dev_ops->link_update)(dev, 0);
1307         }
1308         return 0;
1309 }
1310
1311 void
1312 rte_eth_dev_stop(uint8_t port_id)
1313 {
1314         struct rte_eth_dev *dev;
1315
1316         /* This function is only safe when called from the primary process
1317          * in a multi-process setup*/
1318         PROC_PRIMARY_OR_RET();
1319
1320         VALID_PORTID_OR_RET(port_id);
1321         dev = &rte_eth_devices[port_id];
1322
1323         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1324
1325         if (dev->data->dev_started == 0) {
1326                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1327                         " already stopped\n",
1328                         port_id);
1329                 return;
1330         }
1331
1332         dev->data->dev_started = 0;
1333         (*dev->dev_ops->dev_stop)(dev);
1334 }
1335
1336 int
1337 rte_eth_dev_set_link_up(uint8_t port_id)
1338 {
1339         struct rte_eth_dev *dev;
1340
1341         /* This function is only safe when called from the primary process
1342          * in a multi-process setup*/
1343         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1344
1345         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1346
1347         dev = &rte_eth_devices[port_id];
1348
1349         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1350         return (*dev->dev_ops->dev_set_link_up)(dev);
1351 }
1352
1353 int
1354 rte_eth_dev_set_link_down(uint8_t port_id)
1355 {
1356         struct rte_eth_dev *dev;
1357
1358         /* This function is only safe when called from the primary process
1359          * in a multi-process setup*/
1360         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1361
1362         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1363
1364         dev = &rte_eth_devices[port_id];
1365
1366         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1367         return (*dev->dev_ops->dev_set_link_down)(dev);
1368 }
1369
1370 void
1371 rte_eth_dev_close(uint8_t port_id)
1372 {
1373         struct rte_eth_dev *dev;
1374
1375         /* This function is only safe when called from the primary process
1376          * in a multi-process setup*/
1377         PROC_PRIMARY_OR_RET();
1378
1379         VALID_PORTID_OR_RET(port_id);
1380         dev = &rte_eth_devices[port_id];
1381
1382         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1383         dev->data->dev_started = 0;
1384         (*dev->dev_ops->dev_close)(dev);
1385 }
1386
1387 int
1388 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1389                        uint16_t nb_rx_desc, unsigned int socket_id,
1390                        const struct rte_eth_rxconf *rx_conf,
1391                        struct rte_mempool *mp)
1392 {
1393         int ret;
1394         uint32_t mbp_buf_size;
1395         struct rte_eth_dev *dev;
1396         struct rte_eth_dev_info dev_info;
1397
1398         /* This function is only safe when called from the primary process
1399          * in a multi-process setup*/
1400         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1401
1402         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1403
1404         dev = &rte_eth_devices[port_id];
1405         if (rx_queue_id >= dev->data->nb_rx_queues) {
1406                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1407                 return -EINVAL;
1408         }
1409
1410         if (dev->data->dev_started) {
1411                 PMD_DEBUG_TRACE(
1412                     "port %d must be stopped to allow configuration\n", port_id);
1413                 return -EBUSY;
1414         }
1415
1416         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1417         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1418
1419         /*
1420          * Check the size of the mbuf data buffer.
1421          * This value must be provided in the private data of the memory pool.
1422          * First check that the memory pool has a valid private data.
1423          */
1424         rte_eth_dev_info_get(port_id, &dev_info);
1425         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1426                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1427                                 mp->name, (int) mp->private_data_size,
1428                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1429                 return -ENOSPC;
1430         }
1431         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1432
1433         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1434                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1435                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1436                                 "=%d)\n",
1437                                 mp->name,
1438                                 (int)mbp_buf_size,
1439                                 (int)(RTE_PKTMBUF_HEADROOM +
1440                                       dev_info.min_rx_bufsize),
1441                                 (int)RTE_PKTMBUF_HEADROOM,
1442                                 (int)dev_info.min_rx_bufsize);
1443                 return -EINVAL;
1444         }
1445
1446         if (rx_conf == NULL)
1447                 rx_conf = &dev_info.default_rxconf;
1448
1449         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1450                                               socket_id, rx_conf, mp);
1451         if (!ret) {
1452                 if (!dev->data->min_rx_buf_size ||
1453                     dev->data->min_rx_buf_size > mbp_buf_size)
1454                         dev->data->min_rx_buf_size = mbp_buf_size;
1455         }
1456
1457         return ret;
1458 }
1459
1460 int
1461 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1462                        uint16_t nb_tx_desc, unsigned int socket_id,
1463                        const struct rte_eth_txconf *tx_conf)
1464 {
1465         struct rte_eth_dev *dev;
1466         struct rte_eth_dev_info dev_info;
1467
1468         /* This function is only safe when called from the primary process
1469          * in a multi-process setup*/
1470         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1471
1472         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1473
1474         dev = &rte_eth_devices[port_id];
1475         if (tx_queue_id >= dev->data->nb_tx_queues) {
1476                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1477                 return -EINVAL;
1478         }
1479
1480         if (dev->data->dev_started) {
1481                 PMD_DEBUG_TRACE(
1482                     "port %d must be stopped to allow configuration\n", port_id);
1483                 return -EBUSY;
1484         }
1485
1486         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1487         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1488
1489         rte_eth_dev_info_get(port_id, &dev_info);
1490
1491         if (tx_conf == NULL)
1492                 tx_conf = &dev_info.default_txconf;
1493
1494         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1495                                                socket_id, tx_conf);
1496 }
1497
1498 void
1499 rte_eth_promiscuous_enable(uint8_t port_id)
1500 {
1501         struct rte_eth_dev *dev;
1502
1503         VALID_PORTID_OR_RET(port_id);
1504         dev = &rte_eth_devices[port_id];
1505
1506         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1507         (*dev->dev_ops->promiscuous_enable)(dev);
1508         dev->data->promiscuous = 1;
1509 }
1510
1511 void
1512 rte_eth_promiscuous_disable(uint8_t port_id)
1513 {
1514         struct rte_eth_dev *dev;
1515
1516         VALID_PORTID_OR_RET(port_id);
1517         dev = &rte_eth_devices[port_id];
1518
1519         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1520         dev->data->promiscuous = 0;
1521         (*dev->dev_ops->promiscuous_disable)(dev);
1522 }
1523
1524 int
1525 rte_eth_promiscuous_get(uint8_t port_id)
1526 {
1527         struct rte_eth_dev *dev;
1528
1529         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1530
1531         dev = &rte_eth_devices[port_id];
1532         return dev->data->promiscuous;
1533 }
1534
1535 void
1536 rte_eth_allmulticast_enable(uint8_t port_id)
1537 {
1538         struct rte_eth_dev *dev;
1539
1540         VALID_PORTID_OR_RET(port_id);
1541         dev = &rte_eth_devices[port_id];
1542
1543         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1544         (*dev->dev_ops->allmulticast_enable)(dev);
1545         dev->data->all_multicast = 1;
1546 }
1547
1548 void
1549 rte_eth_allmulticast_disable(uint8_t port_id)
1550 {
1551         struct rte_eth_dev *dev;
1552
1553         VALID_PORTID_OR_RET(port_id);
1554         dev = &rte_eth_devices[port_id];
1555
1556         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1557         dev->data->all_multicast = 0;
1558         (*dev->dev_ops->allmulticast_disable)(dev);
1559 }
1560
1561 int
1562 rte_eth_allmulticast_get(uint8_t port_id)
1563 {
1564         struct rte_eth_dev *dev;
1565
1566         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1567
1568         dev = &rte_eth_devices[port_id];
1569         return dev->data->all_multicast;
1570 }
1571
1572 static inline int
1573 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1574                                 struct rte_eth_link *link)
1575 {
1576         struct rte_eth_link *dst = link;
1577         struct rte_eth_link *src = &(dev->data->dev_link);
1578
1579         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1580                                         *(uint64_t *)src) == 0)
1581                 return -1;
1582
1583         return 0;
1584 }
1585
1586 void
1587 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1588 {
1589         struct rte_eth_dev *dev;
1590
1591         VALID_PORTID_OR_RET(port_id);
1592         dev = &rte_eth_devices[port_id];
1593
1594         if (dev->data->dev_conf.intr_conf.lsc != 0)
1595                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1596         else {
1597                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1598                 (*dev->dev_ops->link_update)(dev, 1);
1599                 *eth_link = dev->data->dev_link;
1600         }
1601 }
1602
1603 void
1604 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1605 {
1606         struct rte_eth_dev *dev;
1607
1608         VALID_PORTID_OR_RET(port_id);
1609         dev = &rte_eth_devices[port_id];
1610
1611         if (dev->data->dev_conf.intr_conf.lsc != 0)
1612                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1613         else {
1614                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1615                 (*dev->dev_ops->link_update)(dev, 0);
1616                 *eth_link = dev->data->dev_link;
1617         }
1618 }
1619
1620 int
1621 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1622 {
1623         struct rte_eth_dev *dev;
1624
1625         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1626
1627         dev = &rte_eth_devices[port_id];
1628         memset(stats, 0, sizeof(*stats));
1629
1630         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1631         (*dev->dev_ops->stats_get)(dev, stats);
1632         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1633         return 0;
1634 }
1635
1636 void
1637 rte_eth_stats_reset(uint8_t port_id)
1638 {
1639         struct rte_eth_dev *dev;
1640
1641         VALID_PORTID_OR_RET(port_id);
1642         dev = &rte_eth_devices[port_id];
1643
1644         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1645         (*dev->dev_ops->stats_reset)(dev);
1646 }
1647
1648 /* retrieve ethdev extended statistics */
1649 int
1650 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1651         unsigned n)
1652 {
1653         struct rte_eth_stats eth_stats;
1654         struct rte_eth_dev *dev;
1655         unsigned count = 0, i, q;
1656         signed xcount = 0;
1657         uint64_t val, *stats_ptr;
1658
1659         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1660
1661         dev = &rte_eth_devices[port_id];
1662
1663         /* Return generic statistics */
1664         count = RTE_NB_STATS;
1665         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1666         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1667
1668         /* implemented by the driver */
1669         if (dev->dev_ops->xstats_get != NULL) {
1670                 /* Retrieve the xstats from the driver at the end of the
1671                  * xstats struct.
1672                  */
1673                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1674                          (n > count) ? n - count : 0);
1675
1676                 if (xcount < 0)
1677                         return xcount;
1678         }
1679
1680         if (n < count + xcount)
1681                 return count + xcount;
1682
1683         /* now fill the xstats structure */
1684         count = 0;
1685         rte_eth_stats_get(port_id, &eth_stats);
1686
1687         /* global stats */
1688         for (i = 0; i < RTE_NB_STATS; i++) {
1689                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1690                                         rte_stats_strings[i].offset);
1691                 val = *stats_ptr;
1692                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1693                         "%s", rte_stats_strings[i].name);
1694                 xstats[count++].value = val;
1695         }
1696
1697         /* per-rxq stats */
1698         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1699                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1700                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1701                                         rte_rxq_stats_strings[i].offset +
1702                                         q * sizeof(uint64_t));
1703                         val = *stats_ptr;
1704                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1705                                 "rx_queue_%u_%s", q,
1706                                 rte_rxq_stats_strings[i].name);
1707                         xstats[count++].value = val;
1708                 }
1709         }
1710
1711         /* per-txq stats */
1712         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1713                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1714                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1715                                         rte_txq_stats_strings[i].offset +
1716                                         q * sizeof(uint64_t));
1717                         val = *stats_ptr;
1718                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1719                                 "tx_queue_%u_%s", q,
1720                                 rte_txq_stats_strings[i].name);
1721                         xstats[count++].value = val;
1722                 }
1723         }
1724
1725         return count + xcount;
1726 }
1727
1728 /* reset ethdev extended statistics */
1729 void
1730 rte_eth_xstats_reset(uint8_t port_id)
1731 {
1732         struct rte_eth_dev *dev;
1733
1734         VALID_PORTID_OR_RET(port_id);
1735         dev = &rte_eth_devices[port_id];
1736
1737         /* implemented by the driver */
1738         if (dev->dev_ops->xstats_reset != NULL) {
1739                 (*dev->dev_ops->xstats_reset)(dev);
1740                 return;
1741         }
1742
1743         /* fallback to default */
1744         rte_eth_stats_reset(port_id);
1745 }
1746
1747 static int
1748 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1749                 uint8_t is_rx)
1750 {
1751         struct rte_eth_dev *dev;
1752
1753         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1754
1755         dev = &rte_eth_devices[port_id];
1756
1757         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1758         return (*dev->dev_ops->queue_stats_mapping_set)
1759                         (dev, queue_id, stat_idx, is_rx);
1760 }
1761
1762
1763 int
1764 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1765                 uint8_t stat_idx)
1766 {
1767         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1768                         STAT_QMAP_TX);
1769 }
1770
1771
1772 int
1773 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1774                 uint8_t stat_idx)
1775 {
1776         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1777                         STAT_QMAP_RX);
1778 }
1779
1780
1781 void
1782 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1783 {
1784         struct rte_eth_dev *dev;
1785
1786         VALID_PORTID_OR_RET(port_id);
1787         dev = &rte_eth_devices[port_id];
1788
1789         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1790
1791         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1792         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1793         dev_info->pci_dev = dev->pci_dev;
1794         if (dev->driver)
1795                 dev_info->driver_name = dev->driver->pci_drv.name;
1796 }
1797
1798 void
1799 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1800 {
1801         struct rte_eth_dev *dev;
1802
1803         VALID_PORTID_OR_RET(port_id);
1804         dev = &rte_eth_devices[port_id];
1805         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1806 }
1807
1808
1809 int
1810 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1811 {
1812         struct rte_eth_dev *dev;
1813
1814         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1815
1816         dev = &rte_eth_devices[port_id];
1817         *mtu = dev->data->mtu;
1818         return 0;
1819 }
1820
1821 int
1822 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1823 {
1824         int ret;
1825         struct rte_eth_dev *dev;
1826
1827         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1828         dev = &rte_eth_devices[port_id];
1829         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1830
1831         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1832         if (!ret)
1833                 dev->data->mtu = mtu;
1834
1835         return ret;
1836 }
1837
1838 int
1839 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1840 {
1841         struct rte_eth_dev *dev;
1842
1843         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1844         dev = &rte_eth_devices[port_id];
1845         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1846                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1847                 return -ENOSYS;
1848         }
1849
1850         if (vlan_id > 4095) {
1851                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1852                                 port_id, (unsigned) vlan_id);
1853                 return -EINVAL;
1854         }
1855         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1856
1857         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1858 }
1859
1860 int
1861 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1862 {
1863         struct rte_eth_dev *dev;
1864
1865         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1866         dev = &rte_eth_devices[port_id];
1867         if (rx_queue_id >= dev->data->nb_rx_queues) {
1868                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1869                 return -EINVAL;
1870         }
1871
1872         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1873         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1874
1875         return 0;
1876 }
1877
1878 int
1879 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1880 {
1881         struct rte_eth_dev *dev;
1882
1883         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1884         dev = &rte_eth_devices[port_id];
1885         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1886         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1887
1888         return 0;
1889 }
1890
1891 int
1892 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1893 {
1894         struct rte_eth_dev *dev;
1895         int ret = 0;
1896         int mask = 0;
1897         int cur, org = 0;
1898
1899         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1900         dev = &rte_eth_devices[port_id];
1901
1902         /*check which option changed by application*/
1903         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1904         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1905         if (cur != org) {
1906                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1907                 mask |= ETH_VLAN_STRIP_MASK;
1908         }
1909
1910         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1911         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1912         if (cur != org) {
1913                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1914                 mask |= ETH_VLAN_FILTER_MASK;
1915         }
1916
1917         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1918         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1919         if (cur != org) {
1920                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1921                 mask |= ETH_VLAN_EXTEND_MASK;
1922         }
1923
1924         /*no change*/
1925         if (mask == 0)
1926                 return ret;
1927
1928         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1929         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1930
1931         return ret;
1932 }
1933
1934 int
1935 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1936 {
1937         struct rte_eth_dev *dev;
1938         int ret = 0;
1939
1940         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1941         dev = &rte_eth_devices[port_id];
1942
1943         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1944                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1945
1946         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1947                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1948
1949         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1950                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1951
1952         return ret;
1953 }
1954
1955 int
1956 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1957 {
1958         struct rte_eth_dev *dev;
1959
1960         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1961         dev = &rte_eth_devices[port_id];
1962         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1963         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1964
1965         return 0;
1966 }
1967
1968 int
1969 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1970                                       struct rte_fdir_filter *fdir_filter,
1971                                       uint8_t queue)
1972 {
1973         struct rte_eth_dev *dev;
1974
1975         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1976         dev = &rte_eth_devices[port_id];
1977
1978         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1979                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1980                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1981                 return -ENOSYS;
1982         }
1983
1984         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1985              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1986             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1987                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
1988                                 "None l4type, source & destinations ports "
1989                                 "should be null!\n");
1990                 return -EINVAL;
1991         }
1992
1993         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1994         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1995                                                                 queue);
1996 }
1997
1998 int
1999 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
2000                                          struct rte_fdir_filter *fdir_filter,
2001                                          uint8_t queue)
2002 {
2003         struct rte_eth_dev *dev;
2004
2005         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2006         dev = &rte_eth_devices[port_id];
2007
2008         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2009                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2010                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2011                 return -ENOSYS;
2012         }
2013
2014         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2015              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2016             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2017                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2018                                 "None l4type, source & destinations ports "
2019                                 "should be null!\n");
2020                 return -EINVAL;
2021         }
2022
2023         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
2024         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
2025                                                                 queue);
2026
2027 }
2028
2029 int
2030 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
2031                                          struct rte_fdir_filter *fdir_filter)
2032 {
2033         struct rte_eth_dev *dev;
2034
2035         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2036         dev = &rte_eth_devices[port_id];
2037
2038         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2039                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2040                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2041                 return -ENOSYS;
2042         }
2043
2044         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2045              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2046             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2047                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2048                                 "None l4type source & destinations ports "
2049                                 "should be null!\n");
2050                 return -EINVAL;
2051         }
2052
2053         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
2054         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
2055 }
2056
2057 int
2058 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
2059 {
2060         struct rte_eth_dev *dev;
2061
2062         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2063         dev = &rte_eth_devices[port_id];
2064         if (!(dev->data->dev_conf.fdir_conf.mode)) {
2065                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2066                 return -ENOSYS;
2067         }
2068
2069         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
2070
2071         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
2072         return 0;
2073 }
2074
2075 int
2076 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
2077                                     struct rte_fdir_filter *fdir_filter,
2078                                     uint16_t soft_id, uint8_t queue,
2079                                     uint8_t drop)
2080 {
2081         struct rte_eth_dev *dev;
2082
2083         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2084         dev = &rte_eth_devices[port_id];
2085
2086         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2087                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2088                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2089                 return -ENOSYS;
2090         }
2091
2092         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2093              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2094             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2095                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2096                                 "None l4type, source & destinations ports "
2097                                 "should be null!\n");
2098                 return -EINVAL;
2099         }
2100
2101         /* For now IPv6 is not supported with perfect filter */
2102         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2103                 return -ENOTSUP;
2104
2105         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
2106         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
2107                                                                 soft_id, queue,
2108                                                                 drop);
2109 }
2110
2111 int
2112 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2113                                        struct rte_fdir_filter *fdir_filter,
2114                                        uint16_t soft_id, uint8_t queue,
2115                                        uint8_t drop)
2116 {
2117         struct rte_eth_dev *dev;
2118
2119         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2120         dev = &rte_eth_devices[port_id];
2121
2122         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2123                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2124                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2125                 return -ENOSYS;
2126         }
2127
2128         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2129              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2130             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2131                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2132                                 "None l4type, source & destinations ports "
2133                                 "should be null!\n");
2134                 return -EINVAL;
2135         }
2136
2137         /* For now IPv6 is not supported with perfect filter */
2138         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2139                 return -ENOTSUP;
2140
2141         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2142         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2143                                                         soft_id, queue, drop);
2144 }
2145
2146 int
2147 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2148                                        struct rte_fdir_filter *fdir_filter,
2149                                        uint16_t soft_id)
2150 {
2151         struct rte_eth_dev *dev;
2152
2153         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2154         dev = &rte_eth_devices[port_id];
2155
2156         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2157                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2158                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2159                 return -ENOSYS;
2160         }
2161
2162         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2163              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2164             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2165                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2166                                 "None l4type, source & destinations ports "
2167                                 "should be null!\n");
2168                 return -EINVAL;
2169         }
2170
2171         /* For now IPv6 is not supported with perfect filter */
2172         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2173                 return -ENOTSUP;
2174
2175         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2176         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2177                                                                 soft_id);
2178 }
2179
2180 int
2181 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2182 {
2183         struct rte_eth_dev *dev;
2184
2185         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2186         dev = &rte_eth_devices[port_id];
2187         if (!(dev->data->dev_conf.fdir_conf.mode)) {
2188                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2189                 return -ENOSYS;
2190         }
2191
2192         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2193         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2194 }
2195
2196 int
2197 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2198 {
2199         struct rte_eth_dev *dev;
2200
2201         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2202         dev = &rte_eth_devices[port_id];
2203         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2204         memset(fc_conf, 0, sizeof(*fc_conf));
2205         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2206 }
2207
2208 int
2209 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2210 {
2211         struct rte_eth_dev *dev;
2212
2213         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2214         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2215                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2216                 return -EINVAL;
2217         }
2218
2219         dev = &rte_eth_devices[port_id];
2220         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2221         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2222 }
2223
2224 int
2225 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2226 {
2227         struct rte_eth_dev *dev;
2228
2229         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2230         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2231                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2232                 return -EINVAL;
2233         }
2234
2235         dev = &rte_eth_devices[port_id];
2236         /* High water, low water validation are device specific */
2237         if  (*dev->dev_ops->priority_flow_ctrl_set)
2238                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2239         return -ENOTSUP;
2240 }
2241
2242 static int
2243 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2244                         uint16_t reta_size)
2245 {
2246         uint16_t i, num;
2247
2248         if (!reta_conf)
2249                 return -EINVAL;
2250
2251         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2252                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2253                                                         RTE_RETA_GROUP_SIZE);
2254                 return -EINVAL;
2255         }
2256
2257         num = reta_size / RTE_RETA_GROUP_SIZE;
2258         for (i = 0; i < num; i++) {
2259                 if (reta_conf[i].mask)
2260                         return 0;
2261         }
2262
2263         return -EINVAL;
2264 }
2265
2266 static int
2267 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2268                          uint16_t reta_size,
2269                          uint8_t max_rxq)
2270 {
2271         uint16_t i, idx, shift;
2272
2273         if (!reta_conf)
2274                 return -EINVAL;
2275
2276         if (max_rxq == 0) {
2277                 PMD_DEBUG_TRACE("No receive queue is available\n");
2278                 return -EINVAL;
2279         }
2280
2281         for (i = 0; i < reta_size; i++) {
2282                 idx = i / RTE_RETA_GROUP_SIZE;
2283                 shift = i % RTE_RETA_GROUP_SIZE;
2284                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2285                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2286                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2287                                 "the maximum rxq index: %u\n", idx, shift,
2288                                 reta_conf[idx].reta[shift], max_rxq);
2289                         return -EINVAL;
2290                 }
2291         }
2292
2293         return 0;
2294 }
2295
2296 int
2297 rte_eth_dev_rss_reta_update(uint8_t port_id,
2298                             struct rte_eth_rss_reta_entry64 *reta_conf,
2299                             uint16_t reta_size)
2300 {
2301         struct rte_eth_dev *dev;
2302         int ret;
2303
2304         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2305         /* Check mask bits */
2306         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2307         if (ret < 0)
2308                 return ret;
2309
2310         dev = &rte_eth_devices[port_id];
2311
2312         /* Check entry value */
2313         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2314                                 dev->data->nb_rx_queues);
2315         if (ret < 0)
2316                 return ret;
2317
2318         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2319         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2320 }
2321
2322 int
2323 rte_eth_dev_rss_reta_query(uint8_t port_id,
2324                            struct rte_eth_rss_reta_entry64 *reta_conf,
2325                            uint16_t reta_size)
2326 {
2327         struct rte_eth_dev *dev;
2328         int ret;
2329
2330         if (port_id >= nb_ports) {
2331                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2332                 return -ENODEV;
2333         }
2334
2335         /* Check mask bits */
2336         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2337         if (ret < 0)
2338                 return ret;
2339
2340         dev = &rte_eth_devices[port_id];
2341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2342         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2343 }
2344
2345 int
2346 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2347 {
2348         struct rte_eth_dev *dev;
2349         uint16_t rss_hash_protos;
2350
2351         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2352         rss_hash_protos = rss_conf->rss_hf;
2353         if ((rss_hash_protos != 0) &&
2354             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2355                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2356                                 rss_hash_protos);
2357                 return -EINVAL;
2358         }
2359         dev = &rte_eth_devices[port_id];
2360         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2361         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2362 }
2363
2364 int
2365 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2366                               struct rte_eth_rss_conf *rss_conf)
2367 {
2368         struct rte_eth_dev *dev;
2369
2370         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2371         dev = &rte_eth_devices[port_id];
2372         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2373         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2374 }
2375
2376 int
2377 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2378                            struct rte_eth_udp_tunnel *udp_tunnel)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2383         if (udp_tunnel == NULL) {
2384                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2385                 return -EINVAL;
2386         }
2387
2388         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2389                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2390                 return -EINVAL;
2391         }
2392
2393         dev = &rte_eth_devices[port_id];
2394         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2395         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2396 }
2397
2398 int
2399 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2400                               struct rte_eth_udp_tunnel *udp_tunnel)
2401 {
2402         struct rte_eth_dev *dev;
2403
2404         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2405         dev = &rte_eth_devices[port_id];
2406
2407         if (udp_tunnel == NULL) {
2408                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2409                 return -EINVAL;
2410         }
2411
2412         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2413                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2414                 return -EINVAL;
2415         }
2416
2417         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2418         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2419 }
2420
2421 int
2422 rte_eth_led_on(uint8_t port_id)
2423 {
2424         struct rte_eth_dev *dev;
2425
2426         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2427         dev = &rte_eth_devices[port_id];
2428         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2429         return (*dev->dev_ops->dev_led_on)(dev);
2430 }
2431
2432 int
2433 rte_eth_led_off(uint8_t port_id)
2434 {
2435         struct rte_eth_dev *dev;
2436
2437         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2438         dev = &rte_eth_devices[port_id];
2439         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2440         return (*dev->dev_ops->dev_led_off)(dev);
2441 }
2442
2443 /*
2444  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2445  * an empty spot.
2446  */
2447 static int
2448 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2449 {
2450         struct rte_eth_dev_info dev_info;
2451         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2452         unsigned i;
2453
2454         rte_eth_dev_info_get(port_id, &dev_info);
2455
2456         for (i = 0; i < dev_info.max_mac_addrs; i++)
2457                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2458                         return i;
2459
2460         return -1;
2461 }
2462
2463 static const struct ether_addr null_mac_addr;
2464
2465 int
2466 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2467                         uint32_t pool)
2468 {
2469         struct rte_eth_dev *dev;
2470         int index;
2471         uint64_t pool_mask;
2472
2473         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2474         dev = &rte_eth_devices[port_id];
2475         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2476
2477         if (is_zero_ether_addr(addr)) {
2478                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2479                         port_id);
2480                 return -EINVAL;
2481         }
2482         if (pool >= ETH_64_POOLS) {
2483                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2484                 return -EINVAL;
2485         }
2486
2487         index = get_mac_addr_index(port_id, addr);
2488         if (index < 0) {
2489                 index = get_mac_addr_index(port_id, &null_mac_addr);
2490                 if (index < 0) {
2491                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2492                                 port_id);
2493                         return -ENOSPC;
2494                 }
2495         } else {
2496                 pool_mask = dev->data->mac_pool_sel[index];
2497
2498                 /* Check if both MAC address and pool is already there, and do nothing */
2499                 if (pool_mask & (1ULL << pool))
2500                         return 0;
2501         }
2502
2503         /* Update NIC */
2504         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2505
2506         /* Update address in NIC data structure */
2507         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2508
2509         /* Update pool bitmap in NIC data structure */
2510         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2511
2512         return 0;
2513 }
2514
2515 int
2516 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2517 {
2518         struct rte_eth_dev *dev;
2519         int index;
2520
2521         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2522         dev = &rte_eth_devices[port_id];
2523         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2524
2525         index = get_mac_addr_index(port_id, addr);
2526         if (index == 0) {
2527                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2528                 return -EADDRINUSE;
2529         } else if (index < 0)
2530                 return 0;  /* Do nothing if address wasn't found */
2531
2532         /* Update NIC */
2533         (*dev->dev_ops->mac_addr_remove)(dev, index);
2534
2535         /* Update address in NIC data structure */
2536         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2537
2538         /* reset pool bitmap */
2539         dev->data->mac_pool_sel[index] = 0;
2540
2541         return 0;
2542 }
2543
2544 int
2545 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2546 {
2547         struct rte_eth_dev *dev;
2548
2549         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2550
2551         if (!is_valid_assigned_ether_addr(addr))
2552                 return -EINVAL;
2553
2554         dev = &rte_eth_devices[port_id];
2555         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2556
2557         /* Update default address in NIC data structure */
2558         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2559
2560         (*dev->dev_ops->mac_addr_set)(dev, addr);
2561
2562         return 0;
2563 }
2564
2565 int
2566 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2567                                 uint16_t rx_mode, uint8_t on)
2568 {
2569         uint16_t num_vfs;
2570         struct rte_eth_dev *dev;
2571         struct rte_eth_dev_info dev_info;
2572
2573         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2574
2575         dev = &rte_eth_devices[port_id];
2576         rte_eth_dev_info_get(port_id, &dev_info);
2577
2578         num_vfs = dev_info.max_vfs;
2579         if (vf > num_vfs) {
2580                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2581                 return -EINVAL;
2582         }
2583
2584         if (rx_mode == 0) {
2585                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2586                 return -EINVAL;
2587         }
2588         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2589         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2590 }
2591
2592 /*
2593  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2594  * an empty spot.
2595  */
2596 static int
2597 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2598 {
2599         struct rte_eth_dev_info dev_info;
2600         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2601         unsigned i;
2602
2603         rte_eth_dev_info_get(port_id, &dev_info);
2604         if (!dev->data->hash_mac_addrs)
2605                 return -1;
2606
2607         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2608                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2609                         ETHER_ADDR_LEN) == 0)
2610                         return i;
2611
2612         return -1;
2613 }
2614
2615 int
2616 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2617                                 uint8_t on)
2618 {
2619         int index;
2620         int ret;
2621         struct rte_eth_dev *dev;
2622
2623         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2624
2625         dev = &rte_eth_devices[port_id];
2626         if (is_zero_ether_addr(addr)) {
2627                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2628                         port_id);
2629                 return -EINVAL;
2630         }
2631
2632         index = get_hash_mac_addr_index(port_id, addr);
2633         /* Check if it's already there, and do nothing */
2634         if ((index >= 0) && (on))
2635                 return 0;
2636
2637         if (index < 0) {
2638                 if (!on) {
2639                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2640                                 "set in UTA\n", port_id);
2641                         return -EINVAL;
2642                 }
2643
2644                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2645                 if (index < 0) {
2646                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2647                                         port_id);
2648                         return -ENOSPC;
2649                 }
2650         }
2651
2652         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2653         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2654         if (ret == 0) {
2655                 /* Update address in NIC data structure */
2656                 if (on)
2657                         ether_addr_copy(addr,
2658                                         &dev->data->hash_mac_addrs[index]);
2659                 else
2660                         ether_addr_copy(&null_mac_addr,
2661                                         &dev->data->hash_mac_addrs[index]);
2662         }
2663
2664         return ret;
2665 }
2666
2667 int
2668 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2669 {
2670         struct rte_eth_dev *dev;
2671
2672         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2673
2674         dev = &rte_eth_devices[port_id];
2675
2676         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2677         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2678 }
2679
2680 int
2681 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2682 {
2683         uint16_t num_vfs;
2684         struct rte_eth_dev *dev;
2685         struct rte_eth_dev_info dev_info;
2686
2687         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2688
2689         dev = &rte_eth_devices[port_id];
2690         rte_eth_dev_info_get(port_id, &dev_info);
2691
2692         num_vfs = dev_info.max_vfs;
2693         if (vf > num_vfs) {
2694                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2695                 return -EINVAL;
2696         }
2697
2698         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2699         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2700 }
2701
2702 int
2703 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2704 {
2705         uint16_t num_vfs;
2706         struct rte_eth_dev *dev;
2707         struct rte_eth_dev_info dev_info;
2708
2709         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2710
2711         dev = &rte_eth_devices[port_id];
2712         rte_eth_dev_info_get(port_id, &dev_info);
2713
2714         num_vfs = dev_info.max_vfs;
2715         if (vf > num_vfs) {
2716                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2717                 return -EINVAL;
2718         }
2719
2720         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2721         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2722 }
2723
2724 int
2725 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2726                                uint64_t vf_mask, uint8_t vlan_on)
2727 {
2728         struct rte_eth_dev *dev;
2729
2730         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2731
2732         dev = &rte_eth_devices[port_id];
2733
2734         if (vlan_id > ETHER_MAX_VLAN_ID) {
2735                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2736                         vlan_id);
2737                 return -EINVAL;
2738         }
2739
2740         if (vf_mask == 0) {
2741                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2742                 return -EINVAL;
2743         }
2744
2745         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2746         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2747                                                    vf_mask, vlan_on);
2748 }
2749
2750 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2751                                         uint16_t tx_rate)
2752 {
2753         struct rte_eth_dev *dev;
2754         struct rte_eth_dev_info dev_info;
2755         struct rte_eth_link link;
2756
2757         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2758
2759         dev = &rte_eth_devices[port_id];
2760         rte_eth_dev_info_get(port_id, &dev_info);
2761         link = dev->data->dev_link;
2762
2763         if (queue_idx > dev_info.max_tx_queues) {
2764                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2765                                 "invalid queue id=%d\n", port_id, queue_idx);
2766                 return -EINVAL;
2767         }
2768
2769         if (tx_rate > link.link_speed) {
2770                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2771                                 "bigger than link speed= %d\n",
2772                         tx_rate, link.link_speed);
2773                 return -EINVAL;
2774         }
2775
2776         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2777         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2778 }
2779
2780 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2781                                 uint64_t q_msk)
2782 {
2783         struct rte_eth_dev *dev;
2784         struct rte_eth_dev_info dev_info;
2785         struct rte_eth_link link;
2786
2787         if (q_msk == 0)
2788                 return 0;
2789
2790         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2791
2792         dev = &rte_eth_devices[port_id];
2793         rte_eth_dev_info_get(port_id, &dev_info);
2794         link = dev->data->dev_link;
2795
2796         if (vf > dev_info.max_vfs) {
2797                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2798                                 "invalid vf id=%d\n", port_id, vf);
2799                 return -EINVAL;
2800         }
2801
2802         if (tx_rate > link.link_speed) {
2803                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2804                                 "bigger than link speed= %d\n",
2805                                 tx_rate, link.link_speed);
2806                 return -EINVAL;
2807         }
2808
2809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2810         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2811 }
2812
2813 int
2814 rte_eth_mirror_rule_set(uint8_t port_id,
2815                         struct rte_eth_mirror_conf *mirror_conf,
2816                         uint8_t rule_id, uint8_t on)
2817 {
2818         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2819
2820         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2821         if (mirror_conf->rule_type == 0) {
2822                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2823                 return -EINVAL;
2824         }
2825
2826         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2827                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2828                                 ETH_64_POOLS - 1);
2829                 return -EINVAL;
2830         }
2831
2832         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2833              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2834             (mirror_conf->pool_mask == 0)) {
2835                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2836                 return -EINVAL;
2837         }
2838
2839         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2840             mirror_conf->vlan.vlan_mask == 0) {
2841                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2842                 return -EINVAL;
2843         }
2844
2845         dev = &rte_eth_devices[port_id];
2846         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2847
2848         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2849 }
2850
2851 int
2852 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2853 {
2854         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2855
2856         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857
2858         dev = &rte_eth_devices[port_id];
2859         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2860
2861         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2862 }
2863
2864 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2865 uint16_t
2866 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2867                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2868 {
2869         struct rte_eth_dev *dev;
2870
2871         VALID_PORTID_OR_ERR_RET(port_id, 0);
2872
2873         dev = &rte_eth_devices[port_id];
2874         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2875         if (queue_id >= dev->data->nb_rx_queues) {
2876                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2877                 return 0;
2878         }
2879         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2880                                                 rx_pkts, nb_pkts);
2881 }
2882
2883 uint16_t
2884 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2885                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2886 {
2887         struct rte_eth_dev *dev;
2888
2889         VALID_PORTID_OR_ERR_RET(port_id, 0);
2890
2891         dev = &rte_eth_devices[port_id];
2892
2893         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2894         if (queue_id >= dev->data->nb_tx_queues) {
2895                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2896                 return 0;
2897         }
2898         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2899                                                 tx_pkts, nb_pkts);
2900 }
2901
2902 uint32_t
2903 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2904 {
2905         struct rte_eth_dev *dev;
2906
2907         VALID_PORTID_OR_ERR_RET(port_id, 0);
2908
2909         dev = &rte_eth_devices[port_id];
2910         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2911         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2912 }
2913
2914 int
2915 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2916 {
2917         struct rte_eth_dev *dev;
2918
2919         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920
2921         dev = &rte_eth_devices[port_id];
2922         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2923         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2924                                                    offset);
2925 }
2926 #endif
2927
2928 int
2929 rte_eth_dev_callback_register(uint8_t port_id,
2930                         enum rte_eth_event_type event,
2931                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2932 {
2933         struct rte_eth_dev *dev;
2934         struct rte_eth_dev_callback *user_cb;
2935
2936         if (!cb_fn)
2937                 return -EINVAL;
2938
2939         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2940
2941         dev = &rte_eth_devices[port_id];
2942         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2943
2944         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2945                 if (user_cb->cb_fn == cb_fn &&
2946                         user_cb->cb_arg == cb_arg &&
2947                         user_cb->event == event) {
2948                         break;
2949                 }
2950         }
2951
2952         /* create a new callback. */
2953         if (user_cb == NULL &&
2954             (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2955                                    sizeof(struct rte_eth_dev_callback), 0))) {
2956                 user_cb->cb_fn = cb_fn;
2957                 user_cb->cb_arg = cb_arg;
2958                 user_cb->event = event;
2959                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2960         }
2961
2962         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2963         return (user_cb == NULL) ? -ENOMEM : 0;
2964 }
2965
2966 int
2967 rte_eth_dev_callback_unregister(uint8_t port_id,
2968                         enum rte_eth_event_type event,
2969                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2970 {
2971         int ret;
2972         struct rte_eth_dev *dev;
2973         struct rte_eth_dev_callback *cb, *next;
2974
2975         if (!cb_fn)
2976                 return -EINVAL;
2977
2978         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2979
2980         dev = &rte_eth_devices[port_id];
2981         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2982
2983         ret = 0;
2984         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2985
2986                 next = TAILQ_NEXT(cb, next);
2987
2988                 if (cb->cb_fn != cb_fn || cb->event != event ||
2989                                 (cb->cb_arg != (void *)-1 &&
2990                                 cb->cb_arg != cb_arg))
2991                         continue;
2992
2993                 /*
2994                  * if this callback is not executing right now,
2995                  * then remove it.
2996                  */
2997                 if (cb->active == 0) {
2998                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2999                         rte_free(cb);
3000                 } else {
3001                         ret = -EAGAIN;
3002                 }
3003         }
3004
3005         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3006         return ret;
3007 }
3008
3009 void
3010 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3011         enum rte_eth_event_type event)
3012 {
3013         struct rte_eth_dev_callback *cb_lst;
3014         struct rte_eth_dev_callback dev_cb;
3015
3016         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3017         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3018                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3019                         continue;
3020                 dev_cb = *cb_lst;
3021                 cb_lst->active = 1;
3022                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3023                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3024                                                 dev_cb.cb_arg);
3025                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3026                 cb_lst->active = 0;
3027         }
3028         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3029 }
3030 #ifdef RTE_NIC_BYPASS
3031 int rte_eth_dev_bypass_init(uint8_t port_id)
3032 {
3033         struct rte_eth_dev *dev;
3034
3035         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3036
3037         dev = &rte_eth_devices[port_id];
3038         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3039         (*dev->dev_ops->bypass_init)(dev);
3040         return 0;
3041 }
3042
3043 int
3044 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3045 {
3046         struct rte_eth_dev *dev;
3047
3048         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3049
3050         dev = &rte_eth_devices[port_id];
3051         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3052         (*dev->dev_ops->bypass_state_show)(dev, state);
3053         return 0;
3054 }
3055
3056 int
3057 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3058 {
3059         struct rte_eth_dev *dev;
3060
3061         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3062
3063         dev = &rte_eth_devices[port_id];
3064         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3065         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3066         return 0;
3067 }
3068
3069 int
3070 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3071 {
3072         struct rte_eth_dev *dev;
3073
3074         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3075
3076         dev = &rte_eth_devices[port_id];
3077         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3078         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3079         return 0;
3080 }
3081
3082 int
3083 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3084 {
3085         struct rte_eth_dev *dev;
3086
3087         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3088
3089         dev = &rte_eth_devices[port_id];
3090
3091         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3092         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3093         return 0;
3094 }
3095
3096 int
3097 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3098 {
3099         struct rte_eth_dev *dev;
3100
3101         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3102
3103         dev = &rte_eth_devices[port_id];
3104
3105         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3106         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3107         return 0;
3108 }
3109
3110 int
3111 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3112 {
3113         struct rte_eth_dev *dev;
3114
3115         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3116
3117         dev = &rte_eth_devices[port_id];
3118
3119         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3120         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3121         return 0;
3122 }
3123
3124 int
3125 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3126 {
3127         struct rte_eth_dev *dev;
3128
3129         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3130
3131         dev = &rte_eth_devices[port_id];
3132
3133         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3134         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3135         return 0;
3136 }
3137
3138 int
3139 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3140 {
3141         struct rte_eth_dev *dev;
3142
3143         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3144
3145         dev = &rte_eth_devices[port_id];
3146
3147         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3148         (*dev->dev_ops->bypass_wd_reset)(dev);
3149         return 0;
3150 }
3151 #endif
3152
3153 int
3154 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3155 {
3156         struct rte_eth_dev *dev;
3157
3158         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159
3160         dev = &rte_eth_devices[port_id];
3161         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3162         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3163                                 RTE_ETH_FILTER_NOP, NULL);
3164 }
3165
3166 int
3167 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3168                        enum rte_filter_op filter_op, void *arg)
3169 {
3170         struct rte_eth_dev *dev;
3171
3172         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3173
3174         dev = &rte_eth_devices[port_id];
3175         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3176         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3177 }
3178
3179 void *
3180 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3181                 rte_rx_callback_fn fn, void *user_param)
3182 {
3183 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3184         rte_errno = ENOTSUP;
3185         return NULL;
3186 #endif
3187         /* check input parameters */
3188         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3189                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3190                 rte_errno = EINVAL;
3191                 return NULL;
3192         }
3193
3194         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3195
3196         if (cb == NULL) {
3197                 rte_errno = ENOMEM;
3198                 return NULL;
3199         }
3200
3201         cb->fn.rx = fn;
3202         cb->param = user_param;
3203
3204         /* Add the callbacks in fifo order. */
3205         struct rte_eth_rxtx_callback *tail =
3206                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3207
3208         if (!tail) {
3209                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3210
3211         } else {
3212                 while (tail->next)
3213                         tail = tail->next;
3214                 tail->next = cb;
3215         }
3216
3217         return cb;
3218 }
3219
3220 void *
3221 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3222                 rte_tx_callback_fn fn, void *user_param)
3223 {
3224 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3225         rte_errno = ENOTSUP;
3226         return NULL;
3227 #endif
3228         /* check input parameters */
3229         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3230                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3231                 rte_errno = EINVAL;
3232                 return NULL;
3233         }
3234
3235         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3236
3237         if (cb == NULL) {
3238                 rte_errno = ENOMEM;
3239                 return NULL;
3240         }
3241
3242         cb->fn.tx = fn;
3243         cb->param = user_param;
3244
3245         /* Add the callbacks in fifo order. */
3246         struct rte_eth_rxtx_callback *tail =
3247                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3248
3249         if (!tail) {
3250                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3251
3252         } else {
3253                 while (tail->next)
3254                         tail = tail->next;
3255                 tail->next = cb;
3256         }
3257
3258         return cb;
3259 }
3260
3261 int
3262 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3263                 struct rte_eth_rxtx_callback *user_cb)
3264 {
3265 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3266         return -ENOTSUP;
3267 #endif
3268         /* Check input parameters. */
3269         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3270                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3271                 return -EINVAL;
3272         }
3273
3274         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3275         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3276         struct rte_eth_rxtx_callback *prev_cb;
3277
3278         /* Reset head pointer and remove user cb if first in the list. */
3279         if (cb == user_cb) {
3280                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3281                 return 0;
3282         }
3283
3284         /* Remove the user cb from the callback list. */
3285         do {
3286                 prev_cb = cb;
3287                 cb = cb->next;
3288
3289                 if (cb == user_cb) {
3290                         prev_cb->next = user_cb->next;
3291                         return 0;
3292                 }
3293
3294         } while (cb != NULL);
3295
3296         /* Callback wasn't found. */
3297         return -EINVAL;
3298 }
3299
3300 int
3301 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3302                 struct rte_eth_rxtx_callback *user_cb)
3303 {
3304 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3305         return -ENOTSUP;
3306 #endif
3307         /* Check input parameters. */
3308         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3309                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3310                 return -EINVAL;
3311         }
3312
3313         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3314         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3315         struct rte_eth_rxtx_callback *prev_cb;
3316
3317         /* Reset head pointer and remove user cb if first in the list. */
3318         if (cb == user_cb) {
3319                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3320                 return 0;
3321         }
3322
3323         /* Remove the user cb from the callback list. */
3324         do {
3325                 prev_cb = cb;
3326                 cb = cb->next;
3327
3328                 if (cb == user_cb) {
3329                         prev_cb->next = user_cb->next;
3330                         return 0;
3331                 }
3332
3333         } while (cb != NULL);
3334
3335         /* Callback wasn't found. */
3336         return -EINVAL;
3337 }
3338
3339 int
3340 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3341                              struct ether_addr *mc_addr_set,
3342                              uint32_t nb_mc_addr)
3343 {
3344         struct rte_eth_dev *dev;
3345
3346         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3347
3348         dev = &rte_eth_devices[port_id];
3349         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3350         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3351 }
3352
3353 int
3354 rte_eth_timesync_enable(uint8_t port_id)
3355 {
3356         struct rte_eth_dev *dev;
3357
3358         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3359         dev = &rte_eth_devices[port_id];
3360
3361         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3362         return (*dev->dev_ops->timesync_enable)(dev);
3363 }
3364
3365 int
3366 rte_eth_timesync_disable(uint8_t port_id)
3367 {
3368         struct rte_eth_dev *dev;
3369
3370         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3371         dev = &rte_eth_devices[port_id];
3372
3373         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3374         return (*dev->dev_ops->timesync_disable)(dev);
3375 }
3376
3377 int
3378 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3379                                    uint32_t flags)
3380 {
3381         struct rte_eth_dev *dev;
3382
3383         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3384         dev = &rte_eth_devices[port_id];
3385
3386         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3387         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3388 }
3389
3390 int
3391 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3392 {
3393         struct rte_eth_dev *dev;
3394
3395         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3396         dev = &rte_eth_devices[port_id];
3397
3398         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3399         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3400 }
3401
3402 int
3403 rte_eth_dev_get_reg_length(uint8_t port_id)
3404 {
3405         struct rte_eth_dev *dev;
3406
3407         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3408
3409         dev = &rte_eth_devices[port_id];
3410         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3411         return (*dev->dev_ops->get_reg_length)(dev);
3412 }
3413
3414 int
3415 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3416 {
3417         struct rte_eth_dev *dev;
3418
3419         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3420
3421         dev = &rte_eth_devices[port_id];
3422         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3423         return (*dev->dev_ops->get_reg)(dev, info);
3424 }
3425
3426 int
3427 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3428 {
3429         struct rte_eth_dev *dev;
3430
3431         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3432
3433         dev = &rte_eth_devices[port_id];
3434         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3435         return (*dev->dev_ops->get_eeprom_length)(dev);
3436 }
3437
3438 int
3439 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3440 {
3441         struct rte_eth_dev *dev;
3442
3443         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3444
3445         dev = &rte_eth_devices[port_id];
3446         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3447         return (*dev->dev_ops->get_eeprom)(dev, info);
3448 }
3449
3450 int
3451 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3452 {
3453         struct rte_eth_dev *dev;
3454
3455         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3456
3457         dev = &rte_eth_devices[port_id];
3458         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3459         return (*dev->dev_ops->set_eeprom)(dev, info);
3460 }