ethdev: prefix internal error checking macros
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define RTE_PMD_DEBUG_TRACE(fmt, args...) do { \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define RTE_PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define RTE_PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 RTE_PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define RTE_PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 RTE_PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define RTE_FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 RTE_PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define RTE_FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 RTE_PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
112         if (!rte_eth_dev_is_valid_port(port_id)) {  \
113                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval; \
115         } \
116 } while (0)
117
118 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
119         if (!rte_eth_dev_is_valid_port(port_id)) { \
120                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return; \
122         } \
123 } while (0)
124
125
126 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
127 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
128 static struct rte_eth_dev_data *rte_eth_dev_data;
129 static uint8_t nb_ports;
130
131 /* spinlock for eth device callbacks */
132 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
133
134 /* store statistics names and its offset in stats structure  */
135 struct rte_eth_xstats_name_off {
136         char name[RTE_ETH_XSTATS_NAME_SIZE];
137         unsigned offset;
138 };
139
140 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
141         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
142         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
143         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
144         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
145         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
146         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
147         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
148                 rx_nombuf)},
149 };
150
151 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
152
153 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
154         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
155         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
156         {"errors", offsetof(struct rte_eth_stats, q_errors)},
157 };
158
159 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
160                 sizeof(rte_rxq_stats_strings[0]))
161
162 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
163         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
164         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
165 };
166 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
167                 sizeof(rte_txq_stats_strings[0]))
168
169
170 /**
171  * The user application callback description.
172  *
173  * It contains callback address to be registered by user application,
174  * the pointer to the parameters for callback, and the event type.
175  */
176 struct rte_eth_dev_callback {
177         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
178         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
179         void *cb_arg;                           /**< Parameter for callback */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 enum {
190         DEV_DETACHED = 0,
191         DEV_ATTACHED
192 };
193
194 static void
195 rte_eth_dev_data_alloc(void)
196 {
197         const unsigned flags = 0;
198         const struct rte_memzone *mz;
199
200         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
201                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
202                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
203                                 rte_socket_id(), flags);
204         } else
205                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
206         if (mz == NULL)
207                 rte_panic("Cannot allocate memzone for ethernet port data\n");
208
209         rte_eth_dev_data = mz->addr;
210         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
211                 memset(rte_eth_dev_data, 0,
212                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
213 }
214
215 struct rte_eth_dev *
216 rte_eth_dev_allocated(const char *name)
217 {
218         unsigned i;
219
220         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
221                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
222                     strcmp(rte_eth_devices[i].data->name, name) == 0)
223                         return &rte_eth_devices[i];
224         }
225         return NULL;
226 }
227
228 static uint8_t
229 rte_eth_dev_find_free_port(void)
230 {
231         unsigned i;
232
233         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
234                 if (rte_eth_devices[i].attached == DEV_DETACHED)
235                         return i;
236         }
237         return RTE_MAX_ETHPORTS;
238 }
239
240 struct rte_eth_dev *
241 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
242 {
243         uint8_t port_id;
244         struct rte_eth_dev *eth_dev;
245
246         port_id = rte_eth_dev_find_free_port();
247         if (port_id == RTE_MAX_ETHPORTS) {
248                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
249                 return NULL;
250         }
251
252         if (rte_eth_dev_data == NULL)
253                 rte_eth_dev_data_alloc();
254
255         if (rte_eth_dev_allocated(name) != NULL) {
256                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
257                                 name);
258                 return NULL;
259         }
260
261         eth_dev = &rte_eth_devices[port_id];
262         eth_dev->data = &rte_eth_dev_data[port_id];
263         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
264         eth_dev->data->port_id = port_id;
265         eth_dev->attached = DEV_ATTACHED;
266         eth_dev->dev_type = type;
267         nb_ports++;
268         return eth_dev;
269 }
270
271 static int
272 rte_eth_dev_create_unique_device_name(char *name, size_t size,
273                 struct rte_pci_device *pci_dev)
274 {
275         int ret;
276
277         if ((name == NULL) || (pci_dev == NULL))
278                 return -EINVAL;
279
280         ret = snprintf(name, size, "%d:%d.%d",
281                         pci_dev->addr.bus, pci_dev->addr.devid,
282                         pci_dev->addr.function);
283         if (ret < 0)
284                 return ret;
285         return 0;
286 }
287
288 int
289 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
290 {
291         if (eth_dev == NULL)
292                 return -EINVAL;
293
294         eth_dev->attached = DEV_DETACHED;
295         nb_ports--;
296         return 0;
297 }
298
299 static int
300 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
301                  struct rte_pci_device *pci_dev)
302 {
303         struct eth_driver    *eth_drv;
304         struct rte_eth_dev *eth_dev;
305         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
306
307         int diag;
308
309         eth_drv = (struct eth_driver *)pci_drv;
310
311         /* Create unique Ethernet device name using PCI address */
312         rte_eth_dev_create_unique_device_name(ethdev_name,
313                         sizeof(ethdev_name), pci_dev);
314
315         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
316         if (eth_dev == NULL)
317                 return -ENOMEM;
318
319         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
320                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
321                                   eth_drv->dev_private_size,
322                                   RTE_CACHE_LINE_SIZE);
323                 if (eth_dev->data->dev_private == NULL)
324                         rte_panic("Cannot allocate memzone for private port data\n");
325         }
326         eth_dev->pci_dev = pci_dev;
327         eth_dev->driver = eth_drv;
328         eth_dev->data->rx_mbuf_alloc_failed = 0;
329
330         /* init user callbacks */
331         TAILQ_INIT(&(eth_dev->link_intr_cbs));
332
333         /*
334          * Set the default MTU.
335          */
336         eth_dev->data->mtu = ETHER_MTU;
337
338         /* Invoke PMD device initialization function */
339         diag = (*eth_drv->eth_dev_init)(eth_dev);
340         if (diag == 0)
341                 return 0;
342
343         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
344                         pci_drv->name,
345                         (unsigned) pci_dev->id.vendor_id,
346                         (unsigned) pci_dev->id.device_id);
347         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
348                 rte_free(eth_dev->data->dev_private);
349         rte_eth_dev_release_port(eth_dev);
350         return diag;
351 }
352
353 static int
354 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
355 {
356         const struct eth_driver *eth_drv;
357         struct rte_eth_dev *eth_dev;
358         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
359         int ret;
360
361         if (pci_dev == NULL)
362                 return -EINVAL;
363
364         /* Create unique Ethernet device name using PCI address */
365         rte_eth_dev_create_unique_device_name(ethdev_name,
366                         sizeof(ethdev_name), pci_dev);
367
368         eth_dev = rte_eth_dev_allocated(ethdev_name);
369         if (eth_dev == NULL)
370                 return -ENODEV;
371
372         eth_drv = (const struct eth_driver *)pci_dev->driver;
373
374         /* Invoke PMD device uninit function */
375         if (*eth_drv->eth_dev_uninit) {
376                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
377                 if (ret)
378                         return ret;
379         }
380
381         /* free ether device */
382         rte_eth_dev_release_port(eth_dev);
383
384         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
385                 rte_free(eth_dev->data->dev_private);
386
387         eth_dev->pci_dev = NULL;
388         eth_dev->driver = NULL;
389         eth_dev->data = NULL;
390
391         return 0;
392 }
393
394 /**
395  * Register an Ethernet [Poll Mode] driver.
396  *
397  * Function invoked by the initialization function of an Ethernet driver
398  * to simultaneously register itself as a PCI driver and as an Ethernet
399  * Poll Mode Driver.
400  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
401  * structure embedded in the *eth_drv* structure, after having stored the
402  * address of the rte_eth_dev_init() function in the *devinit* field of
403  * the *pci_drv* structure.
404  * During the PCI probing phase, the rte_eth_dev_init() function is
405  * invoked for each PCI [Ethernet device] matching the embedded PCI
406  * identifiers provided by the driver.
407  */
408 void
409 rte_eth_driver_register(struct eth_driver *eth_drv)
410 {
411         eth_drv->pci_drv.devinit = rte_eth_dev_init;
412         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
413         rte_eal_pci_register(&eth_drv->pci_drv);
414 }
415
416 int
417 rte_eth_dev_is_valid_port(uint8_t port_id)
418 {
419         if (port_id >= RTE_MAX_ETHPORTS ||
420             rte_eth_devices[port_id].attached != DEV_ATTACHED)
421                 return 0;
422         else
423                 return 1;
424 }
425
426 int
427 rte_eth_dev_socket_id(uint8_t port_id)
428 {
429         if (!rte_eth_dev_is_valid_port(port_id))
430                 return -1;
431         return rte_eth_devices[port_id].data->numa_node;
432 }
433
434 uint8_t
435 rte_eth_dev_count(void)
436 {
437         return nb_ports;
438 }
439
440 static enum rte_eth_dev_type
441 rte_eth_dev_get_device_type(uint8_t port_id)
442 {
443         if (!rte_eth_dev_is_valid_port(port_id))
444                 return RTE_ETH_DEV_UNKNOWN;
445         return rte_eth_devices[port_id].dev_type;
446 }
447
448 static int
449 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
450 {
451         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
452
453         if (addr == NULL) {
454                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
455                 return -EINVAL;
456         }
457
458         *addr = rte_eth_devices[port_id].pci_dev->addr;
459         return 0;
460 }
461
462 static int
463 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
464 {
465         char *tmp;
466
467         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
468
469         if (name == NULL) {
470                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
471                 return -EINVAL;
472         }
473
474         /* shouldn't check 'rte_eth_devices[i].data',
475          * because it might be overwritten by VDEV PMD */
476         tmp = rte_eth_dev_data[port_id].name;
477         strcpy(name, tmp);
478         return 0;
479 }
480
481 static int
482 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
483 {
484         int i;
485
486         if (name == NULL) {
487                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
488                 return -EINVAL;
489         }
490
491         *port_id = RTE_MAX_ETHPORTS;
492
493         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
494
495                 if (!strncmp(name,
496                         rte_eth_dev_data[i].name, strlen(name))) {
497
498                         *port_id = i;
499
500                         return 0;
501                 }
502         }
503         return -ENODEV;
504 }
505
506 static int
507 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
508 {
509         int i;
510         struct rte_pci_device *pci_dev = NULL;
511
512         if (addr == NULL) {
513                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
514                 return -EINVAL;
515         }
516
517         *port_id = RTE_MAX_ETHPORTS;
518
519         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
520
521                 pci_dev = rte_eth_devices[i].pci_dev;
522
523                 if (pci_dev &&
524                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
525
526                         *port_id = i;
527
528                         return 0;
529                 }
530         }
531         return -ENODEV;
532 }
533
534 static int
535 rte_eth_dev_is_detachable(uint8_t port_id)
536 {
537         uint32_t dev_flags;
538
539         if (!rte_eth_dev_is_valid_port(port_id)) {
540                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
541                 return -EINVAL;
542         }
543
544         switch (rte_eth_devices[port_id].data->kdrv) {
545         case RTE_KDRV_IGB_UIO:
546         case RTE_KDRV_UIO_GENERIC:
547         case RTE_KDRV_NIC_UIO:
548         case RTE_KDRV_NONE:
549                 break;
550         case RTE_KDRV_VFIO:
551         default:
552                 return -ENOTSUP;
553         }
554         dev_flags = rte_eth_devices[port_id].data->dev_flags;
555         return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
556 }
557
558 /* attach the new physical device, then store port_id of the device */
559 static int
560 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
561 {
562         if ((addr == NULL) || (port_id == NULL))
563                 goto err;
564
565         /* re-construct pci_device_list */
566         if (rte_eal_pci_scan())
567                 goto err;
568         /* Invoke probe func of the driver can handle the new device. */
569         if (rte_eal_pci_probe_one(addr))
570                 goto err;
571
572         if (rte_eth_dev_get_port_by_addr(addr, port_id))
573                 goto err;
574
575         return 0;
576 err:
577         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
578         return -1;
579 }
580
581 /* detach the new physical device, then store pci_addr of the device */
582 static int
583 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
584 {
585         struct rte_pci_addr freed_addr;
586         struct rte_pci_addr vp;
587
588         if (addr == NULL)
589                 goto err;
590
591         /* check whether the driver supports detach feature, or not */
592         if (rte_eth_dev_is_detachable(port_id))
593                 goto err;
594
595         /* get pci address by port id */
596         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
597                 goto err;
598
599         /* Zeroed pci addr means the port comes from virtual device */
600         vp.domain = vp.bus = vp.devid = vp.function = 0;
601         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
602                 goto err;
603
604         /* invoke devuninit func of the pci driver,
605          * also remove the device from pci_device_list */
606         if (rte_eal_pci_detach(&freed_addr))
607                 goto err;
608
609         *addr = freed_addr;
610         return 0;
611 err:
612         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
613         return -1;
614 }
615
616 /* attach the new virtual device, then store port_id of the device */
617 static int
618 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
619 {
620         char *name = NULL, *args = NULL;
621         int ret = -1;
622
623         if ((vdevargs == NULL) || (port_id == NULL))
624                 goto end;
625
626         /* parse vdevargs, then retrieve device name and args */
627         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
628                 goto end;
629
630         /* walk around dev_driver_list to find the driver of the device,
631          * then invoke probe function of the driver.
632          * rte_eal_vdev_init() updates port_id allocated after
633          * initialization.
634          */
635         if (rte_eal_vdev_init(name, args))
636                 goto end;
637
638         if (rte_eth_dev_get_port_by_name(name, port_id))
639                 goto end;
640
641         ret = 0;
642 end:
643         if (name)
644                 free(name);
645         if (args)
646                 free(args);
647
648         if (ret < 0)
649                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
650         return ret;
651 }
652
653 /* detach the new virtual device, then store the name of the device */
654 static int
655 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
656 {
657         char name[RTE_ETH_NAME_MAX_LEN];
658
659         if (vdevname == NULL)
660                 goto err;
661
662         /* check whether the driver supports detach feature, or not */
663         if (rte_eth_dev_is_detachable(port_id))
664                 goto err;
665
666         /* get device name by port id */
667         if (rte_eth_dev_get_name_by_port(port_id, name))
668                 goto err;
669         /* walk around dev_driver_list to find the driver of the device,
670          * then invoke uninit function of the driver */
671         if (rte_eal_vdev_uninit(name))
672                 goto err;
673
674         strncpy(vdevname, name, sizeof(name));
675         return 0;
676 err:
677         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
678         return -1;
679 }
680
681 /* attach the new device, then store port_id of the device */
682 int
683 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
684 {
685         struct rte_pci_addr addr;
686
687         if ((devargs == NULL) || (port_id == NULL))
688                 return -EINVAL;
689
690         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
691                 return rte_eth_dev_attach_pdev(&addr, port_id);
692         else
693                 return rte_eth_dev_attach_vdev(devargs, port_id);
694 }
695
696 /* detach the device, then store the name of the device */
697 int
698 rte_eth_dev_detach(uint8_t port_id, char *name)
699 {
700         struct rte_pci_addr addr;
701         int ret;
702
703         if (name == NULL)
704                 return -EINVAL;
705
706         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
707                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
708                 if (ret < 0)
709                         return ret;
710
711                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
712                 if (ret == 0)
713                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
714                                 "%04x:%02x:%02x.%d",
715                                 addr.domain, addr.bus,
716                                 addr.devid, addr.function);
717
718                 return ret;
719         } else
720                 return rte_eth_dev_detach_vdev(port_id, name);
721 }
722
723 static int
724 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
725 {
726         uint16_t old_nb_queues = dev->data->nb_rx_queues;
727         void **rxq;
728         unsigned i;
729
730         if (dev->data->rx_queues == NULL) { /* first time configuration */
731                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
732                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
733                                 RTE_CACHE_LINE_SIZE);
734                 if (dev->data->rx_queues == NULL) {
735                         dev->data->nb_rx_queues = 0;
736                         return -(ENOMEM);
737                 }
738         } else { /* re-configure */
739                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
740
741                 rxq = dev->data->rx_queues;
742
743                 for (i = nb_queues; i < old_nb_queues; i++)
744                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
745                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
746                                 RTE_CACHE_LINE_SIZE);
747                 if (rxq == NULL)
748                         return -(ENOMEM);
749                 if (nb_queues > old_nb_queues) {
750                         uint16_t new_qs = nb_queues - old_nb_queues;
751
752                         memset(rxq + old_nb_queues, 0,
753                                 sizeof(rxq[0]) * new_qs);
754                 }
755
756                 dev->data->rx_queues = rxq;
757
758         }
759         dev->data->nb_rx_queues = nb_queues;
760         return 0;
761 }
762
763 int
764 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
765 {
766         struct rte_eth_dev *dev;
767
768         /* This function is only safe when called from the primary process
769          * in a multi-process setup*/
770         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
771
772         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
773
774         dev = &rte_eth_devices[port_id];
775         if (rx_queue_id >= dev->data->nb_rx_queues) {
776                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
777                 return -EINVAL;
778         }
779
780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
781
782         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
783                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
784                         " already started\n",
785                         rx_queue_id, port_id);
786                 return 0;
787         }
788
789         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
790
791 }
792
793 int
794 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
795 {
796         struct rte_eth_dev *dev;
797
798         /* This function is only safe when called from the primary process
799          * in a multi-process setup*/
800         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
801
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
803
804         dev = &rte_eth_devices[port_id];
805         if (rx_queue_id >= dev->data->nb_rx_queues) {
806                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
807                 return -EINVAL;
808         }
809
810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
811
812         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
813                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
814                         " already stopped\n",
815                         rx_queue_id, port_id);
816                 return 0;
817         }
818
819         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
820
821 }
822
823 int
824 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
825 {
826         struct rte_eth_dev *dev;
827
828         /* This function is only safe when called from the primary process
829          * in a multi-process setup*/
830         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
831
832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
833
834         dev = &rte_eth_devices[port_id];
835         if (tx_queue_id >= dev->data->nb_tx_queues) {
836                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
837                 return -EINVAL;
838         }
839
840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
841
842         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
843                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
844                         " already started\n",
845                         tx_queue_id, port_id);
846                 return 0;
847         }
848
849         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
850
851 }
852
853 int
854 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
855 {
856         struct rte_eth_dev *dev;
857
858         /* This function is only safe when called from the primary process
859          * in a multi-process setup*/
860         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
861
862         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
863
864         dev = &rte_eth_devices[port_id];
865         if (tx_queue_id >= dev->data->nb_tx_queues) {
866                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
867                 return -EINVAL;
868         }
869
870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
871
872         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
873                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
874                         " already stopped\n",
875                         tx_queue_id, port_id);
876                 return 0;
877         }
878
879         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
880
881 }
882
883 static int
884 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
885 {
886         uint16_t old_nb_queues = dev->data->nb_tx_queues;
887         void **txq;
888         unsigned i;
889
890         if (dev->data->tx_queues == NULL) { /* first time configuration */
891                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
892                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
893                                                    RTE_CACHE_LINE_SIZE);
894                 if (dev->data->tx_queues == NULL) {
895                         dev->data->nb_tx_queues = 0;
896                         return -(ENOMEM);
897                 }
898         } else { /* re-configure */
899                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
900
901                 txq = dev->data->tx_queues;
902
903                 for (i = nb_queues; i < old_nb_queues; i++)
904                         (*dev->dev_ops->tx_queue_release)(txq[i]);
905                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
906                                   RTE_CACHE_LINE_SIZE);
907                 if (txq == NULL)
908                         return -ENOMEM;
909                 if (nb_queues > old_nb_queues) {
910                         uint16_t new_qs = nb_queues - old_nb_queues;
911
912                         memset(txq + old_nb_queues, 0,
913                                sizeof(txq[0]) * new_qs);
914                 }
915
916                 dev->data->tx_queues = txq;
917
918         }
919         dev->data->nb_tx_queues = nb_queues;
920         return 0;
921 }
922
923 int
924 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
925                       const struct rte_eth_conf *dev_conf)
926 {
927         struct rte_eth_dev *dev;
928         struct rte_eth_dev_info dev_info;
929         int diag;
930
931         /* This function is only safe when called from the primary process
932          * in a multi-process setup*/
933         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
934
935         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
936
937         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
938                 RTE_PMD_DEBUG_TRACE(
939                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
940                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
941                 return -EINVAL;
942         }
943
944         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
945                 RTE_PMD_DEBUG_TRACE(
946                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
947                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
948                 return -EINVAL;
949         }
950
951         dev = &rte_eth_devices[port_id];
952
953         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
955
956         if (dev->data->dev_started) {
957                 RTE_PMD_DEBUG_TRACE(
958                     "port %d must be stopped to allow configuration\n", port_id);
959                 return -EBUSY;
960         }
961
962         /*
963          * Check that the numbers of RX and TX queues are not greater
964          * than the maximum number of RX and TX queues supported by the
965          * configured device.
966          */
967         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
968         if (nb_rx_q > dev_info.max_rx_queues) {
969                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
970                                 port_id, nb_rx_q, dev_info.max_rx_queues);
971                 return -EINVAL;
972         }
973         if (nb_rx_q == 0) {
974                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
975                 return -EINVAL;
976         }
977
978         if (nb_tx_q > dev_info.max_tx_queues) {
979                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
980                                 port_id, nb_tx_q, dev_info.max_tx_queues);
981                 return -EINVAL;
982         }
983         if (nb_tx_q == 0) {
984                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
985                 return -EINVAL;
986         }
987
988         /* Copy the dev_conf parameter into the dev structure */
989         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
990
991         /*
992          * If link state interrupt is enabled, check that the
993          * device supports it.
994          */
995         if ((dev_conf->intr_conf.lsc == 1) &&
996                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
997                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
998                                         dev->data->drv_name);
999                         return -EINVAL;
1000         }
1001
1002         /*
1003          * If jumbo frames are enabled, check that the maximum RX packet
1004          * length is supported by the configured device.
1005          */
1006         if (dev_conf->rxmode.jumbo_frame == 1) {
1007                 if (dev_conf->rxmode.max_rx_pkt_len >
1008                     dev_info.max_rx_pktlen) {
1009                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1010                                 " > max valid value %u\n",
1011                                 port_id,
1012                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1013                                 (unsigned)dev_info.max_rx_pktlen);
1014                         return -EINVAL;
1015                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1016                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1017                                 " < min valid value %u\n",
1018                                 port_id,
1019                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1020                                 (unsigned)ETHER_MIN_LEN);
1021                         return -EINVAL;
1022                 }
1023         } else {
1024                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1025                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1026                         /* Use default value */
1027                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1028                                                         ETHER_MAX_LEN;
1029         }
1030
1031         /*
1032          * Setup new number of RX/TX queues and reconfigure device.
1033          */
1034         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1035         if (diag != 0) {
1036                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1037                                 port_id, diag);
1038                 return diag;
1039         }
1040
1041         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1042         if (diag != 0) {
1043                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1044                                 port_id, diag);
1045                 rte_eth_dev_rx_queue_config(dev, 0);
1046                 return diag;
1047         }
1048
1049         diag = (*dev->dev_ops->dev_configure)(dev);
1050         if (diag != 0) {
1051                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1052                                 port_id, diag);
1053                 rte_eth_dev_rx_queue_config(dev, 0);
1054                 rte_eth_dev_tx_queue_config(dev, 0);
1055                 return diag;
1056         }
1057
1058         return 0;
1059 }
1060
1061 static void
1062 rte_eth_dev_config_restore(uint8_t port_id)
1063 {
1064         struct rte_eth_dev *dev;
1065         struct rte_eth_dev_info dev_info;
1066         struct ether_addr addr;
1067         uint16_t i;
1068         uint32_t pool = 0;
1069
1070         dev = &rte_eth_devices[port_id];
1071
1072         rte_eth_dev_info_get(port_id, &dev_info);
1073
1074         if (RTE_ETH_DEV_SRIOV(dev).active)
1075                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1076
1077         /* replay MAC address configuration */
1078         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1079                 addr = dev->data->mac_addrs[i];
1080
1081                 /* skip zero address */
1082                 if (is_zero_ether_addr(&addr))
1083                         continue;
1084
1085                 /* add address to the hardware */
1086                 if  (*dev->dev_ops->mac_addr_add &&
1087                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1088                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1089                 else {
1090                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1091                                         port_id);
1092                         /* exit the loop but not return an error */
1093                         break;
1094                 }
1095         }
1096
1097         /* replay promiscuous configuration */
1098         if (rte_eth_promiscuous_get(port_id) == 1)
1099                 rte_eth_promiscuous_enable(port_id);
1100         else if (rte_eth_promiscuous_get(port_id) == 0)
1101                 rte_eth_promiscuous_disable(port_id);
1102
1103         /* replay all multicast configuration */
1104         if (rte_eth_allmulticast_get(port_id) == 1)
1105                 rte_eth_allmulticast_enable(port_id);
1106         else if (rte_eth_allmulticast_get(port_id) == 0)
1107                 rte_eth_allmulticast_disable(port_id);
1108 }
1109
1110 int
1111 rte_eth_dev_start(uint8_t port_id)
1112 {
1113         struct rte_eth_dev *dev;
1114         int diag;
1115
1116         /* This function is only safe when called from the primary process
1117          * in a multi-process setup*/
1118         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1119
1120         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1121
1122         dev = &rte_eth_devices[port_id];
1123
1124         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1125
1126         if (dev->data->dev_started != 0) {
1127                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1128                         " already started\n",
1129                         port_id);
1130                 return 0;
1131         }
1132
1133         diag = (*dev->dev_ops->dev_start)(dev);
1134         if (diag == 0)
1135                 dev->data->dev_started = 1;
1136         else
1137                 return diag;
1138
1139         rte_eth_dev_config_restore(port_id);
1140
1141         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1142                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1143                 (*dev->dev_ops->link_update)(dev, 0);
1144         }
1145         return 0;
1146 }
1147
1148 void
1149 rte_eth_dev_stop(uint8_t port_id)
1150 {
1151         struct rte_eth_dev *dev;
1152
1153         /* This function is only safe when called from the primary process
1154          * in a multi-process setup*/
1155         RTE_PROC_PRIMARY_OR_RET();
1156
1157         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1158         dev = &rte_eth_devices[port_id];
1159
1160         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1161
1162         if (dev->data->dev_started == 0) {
1163                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1164                         " already stopped\n",
1165                         port_id);
1166                 return;
1167         }
1168
1169         dev->data->dev_started = 0;
1170         (*dev->dev_ops->dev_stop)(dev);
1171 }
1172
1173 int
1174 rte_eth_dev_set_link_up(uint8_t port_id)
1175 {
1176         struct rte_eth_dev *dev;
1177
1178         /* This function is only safe when called from the primary process
1179          * in a multi-process setup*/
1180         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1181
1182         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1183
1184         dev = &rte_eth_devices[port_id];
1185
1186         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1187         return (*dev->dev_ops->dev_set_link_up)(dev);
1188 }
1189
1190 int
1191 rte_eth_dev_set_link_down(uint8_t port_id)
1192 {
1193         struct rte_eth_dev *dev;
1194
1195         /* This function is only safe when called from the primary process
1196          * in a multi-process setup*/
1197         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1198
1199         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1200
1201         dev = &rte_eth_devices[port_id];
1202
1203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1204         return (*dev->dev_ops->dev_set_link_down)(dev);
1205 }
1206
1207 void
1208 rte_eth_dev_close(uint8_t port_id)
1209 {
1210         struct rte_eth_dev *dev;
1211
1212         /* This function is only safe when called from the primary process
1213          * in a multi-process setup*/
1214         RTE_PROC_PRIMARY_OR_RET();
1215
1216         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1217         dev = &rte_eth_devices[port_id];
1218
1219         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1220         dev->data->dev_started = 0;
1221         (*dev->dev_ops->dev_close)(dev);
1222
1223         rte_free(dev->data->rx_queues);
1224         dev->data->rx_queues = NULL;
1225         rte_free(dev->data->tx_queues);
1226         dev->data->tx_queues = NULL;
1227 }
1228
1229 int
1230 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1231                        uint16_t nb_rx_desc, unsigned int socket_id,
1232                        const struct rte_eth_rxconf *rx_conf,
1233                        struct rte_mempool *mp)
1234 {
1235         int ret;
1236         uint32_t mbp_buf_size;
1237         struct rte_eth_dev *dev;
1238         struct rte_eth_dev_info dev_info;
1239
1240         /* This function is only safe when called from the primary process
1241          * in a multi-process setup*/
1242         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1243
1244         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1245
1246         dev = &rte_eth_devices[port_id];
1247         if (rx_queue_id >= dev->data->nb_rx_queues) {
1248                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1249                 return -EINVAL;
1250         }
1251
1252         if (dev->data->dev_started) {
1253                 RTE_PMD_DEBUG_TRACE(
1254                     "port %d must be stopped to allow configuration\n", port_id);
1255                 return -EBUSY;
1256         }
1257
1258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1259         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1260
1261         /*
1262          * Check the size of the mbuf data buffer.
1263          * This value must be provided in the private data of the memory pool.
1264          * First check that the memory pool has a valid private data.
1265          */
1266         rte_eth_dev_info_get(port_id, &dev_info);
1267         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1268                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1269                                 mp->name, (int) mp->private_data_size,
1270                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1271                 return -ENOSPC;
1272         }
1273         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1274
1275         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1276                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1277                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1278                                 "=%d)\n",
1279                                 mp->name,
1280                                 (int)mbp_buf_size,
1281                                 (int)(RTE_PKTMBUF_HEADROOM +
1282                                       dev_info.min_rx_bufsize),
1283                                 (int)RTE_PKTMBUF_HEADROOM,
1284                                 (int)dev_info.min_rx_bufsize);
1285                 return -EINVAL;
1286         }
1287
1288         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1289                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1290                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1291
1292                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1293                         "should be: <= %hu, = %hu, and a product of %hu\n",
1294                         nb_rx_desc,
1295                         dev_info.rx_desc_lim.nb_max,
1296                         dev_info.rx_desc_lim.nb_min,
1297                         dev_info.rx_desc_lim.nb_align);
1298                 return -EINVAL;
1299         }
1300
1301         if (rx_conf == NULL)
1302                 rx_conf = &dev_info.default_rxconf;
1303
1304         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1305                                               socket_id, rx_conf, mp);
1306         if (!ret) {
1307                 if (!dev->data->min_rx_buf_size ||
1308                     dev->data->min_rx_buf_size > mbp_buf_size)
1309                         dev->data->min_rx_buf_size = mbp_buf_size;
1310         }
1311
1312         return ret;
1313 }
1314
1315 int
1316 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1317                        uint16_t nb_tx_desc, unsigned int socket_id,
1318                        const struct rte_eth_txconf *tx_conf)
1319 {
1320         struct rte_eth_dev *dev;
1321         struct rte_eth_dev_info dev_info;
1322
1323         /* This function is only safe when called from the primary process
1324          * in a multi-process setup*/
1325         RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1326
1327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1328
1329         dev = &rte_eth_devices[port_id];
1330         if (tx_queue_id >= dev->data->nb_tx_queues) {
1331                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1332                 return -EINVAL;
1333         }
1334
1335         if (dev->data->dev_started) {
1336                 RTE_PMD_DEBUG_TRACE(
1337                     "port %d must be stopped to allow configuration\n", port_id);
1338                 return -EBUSY;
1339         }
1340
1341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1342         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1343
1344         rte_eth_dev_info_get(port_id, &dev_info);
1345
1346         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1347             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1348             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1349                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1350                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1351                                 nb_tx_desc,
1352                                 dev_info.tx_desc_lim.nb_max,
1353                                 dev_info.tx_desc_lim.nb_min,
1354                                 dev_info.tx_desc_lim.nb_align);
1355                 return -EINVAL;
1356         }
1357
1358         if (tx_conf == NULL)
1359                 tx_conf = &dev_info.default_txconf;
1360
1361         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1362                                                socket_id, tx_conf);
1363 }
1364
1365 void
1366 rte_eth_promiscuous_enable(uint8_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369
1370         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1371         dev = &rte_eth_devices[port_id];
1372
1373         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1374         (*dev->dev_ops->promiscuous_enable)(dev);
1375         dev->data->promiscuous = 1;
1376 }
1377
1378 void
1379 rte_eth_promiscuous_disable(uint8_t port_id)
1380 {
1381         struct rte_eth_dev *dev;
1382
1383         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1384         dev = &rte_eth_devices[port_id];
1385
1386         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1387         dev->data->promiscuous = 0;
1388         (*dev->dev_ops->promiscuous_disable)(dev);
1389 }
1390
1391 int
1392 rte_eth_promiscuous_get(uint8_t port_id)
1393 {
1394         struct rte_eth_dev *dev;
1395
1396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1397
1398         dev = &rte_eth_devices[port_id];
1399         return dev->data->promiscuous;
1400 }
1401
1402 void
1403 rte_eth_allmulticast_enable(uint8_t port_id)
1404 {
1405         struct rte_eth_dev *dev;
1406
1407         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1408         dev = &rte_eth_devices[port_id];
1409
1410         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1411         (*dev->dev_ops->allmulticast_enable)(dev);
1412         dev->data->all_multicast = 1;
1413 }
1414
1415 void
1416 rte_eth_allmulticast_disable(uint8_t port_id)
1417 {
1418         struct rte_eth_dev *dev;
1419
1420         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1421         dev = &rte_eth_devices[port_id];
1422
1423         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1424         dev->data->all_multicast = 0;
1425         (*dev->dev_ops->allmulticast_disable)(dev);
1426 }
1427
1428 int
1429 rte_eth_allmulticast_get(uint8_t port_id)
1430 {
1431         struct rte_eth_dev *dev;
1432
1433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1434
1435         dev = &rte_eth_devices[port_id];
1436         return dev->data->all_multicast;
1437 }
1438
1439 static inline int
1440 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1441                                 struct rte_eth_link *link)
1442 {
1443         struct rte_eth_link *dst = link;
1444         struct rte_eth_link *src = &(dev->data->dev_link);
1445
1446         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1447                                         *(uint64_t *)src) == 0)
1448                 return -1;
1449
1450         return 0;
1451 }
1452
1453 void
1454 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1455 {
1456         struct rte_eth_dev *dev;
1457
1458         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1459         dev = &rte_eth_devices[port_id];
1460
1461         if (dev->data->dev_conf.intr_conf.lsc != 0)
1462                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1463         else {
1464                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1465                 (*dev->dev_ops->link_update)(dev, 1);
1466                 *eth_link = dev->data->dev_link;
1467         }
1468 }
1469
1470 void
1471 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1472 {
1473         struct rte_eth_dev *dev;
1474
1475         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1476         dev = &rte_eth_devices[port_id];
1477
1478         if (dev->data->dev_conf.intr_conf.lsc != 0)
1479                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1480         else {
1481                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1482                 (*dev->dev_ops->link_update)(dev, 0);
1483                 *eth_link = dev->data->dev_link;
1484         }
1485 }
1486
1487 int
1488 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1489 {
1490         struct rte_eth_dev *dev;
1491
1492         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1493
1494         dev = &rte_eth_devices[port_id];
1495         memset(stats, 0, sizeof(*stats));
1496
1497         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1498         (*dev->dev_ops->stats_get)(dev, stats);
1499         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1500         return 0;
1501 }
1502
1503 void
1504 rte_eth_stats_reset(uint8_t port_id)
1505 {
1506         struct rte_eth_dev *dev;
1507
1508         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1509         dev = &rte_eth_devices[port_id];
1510
1511         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1512         (*dev->dev_ops->stats_reset)(dev);
1513 }
1514
1515 /* retrieve ethdev extended statistics */
1516 int
1517 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1518         unsigned n)
1519 {
1520         struct rte_eth_stats eth_stats;
1521         struct rte_eth_dev *dev;
1522         unsigned count = 0, i, q;
1523         signed xcount = 0;
1524         uint64_t val, *stats_ptr;
1525
1526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1527
1528         dev = &rte_eth_devices[port_id];
1529
1530         /* Return generic statistics */
1531         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1532                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1533
1534         /* implemented by the driver */
1535         if (dev->dev_ops->xstats_get != NULL) {
1536                 /* Retrieve the xstats from the driver at the end of the
1537                  * xstats struct.
1538                  */
1539                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1540                          (n > count) ? n - count : 0);
1541
1542                 if (xcount < 0)
1543                         return xcount;
1544         }
1545
1546         if (n < count + xcount)
1547                 return count + xcount;
1548
1549         /* now fill the xstats structure */
1550         count = 0;
1551         rte_eth_stats_get(port_id, &eth_stats);
1552
1553         /* global stats */
1554         for (i = 0; i < RTE_NB_STATS; i++) {
1555                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1556                                         rte_stats_strings[i].offset);
1557                 val = *stats_ptr;
1558                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1559                         "%s", rte_stats_strings[i].name);
1560                 xstats[count++].value = val;
1561         }
1562
1563         /* per-rxq stats */
1564         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1565                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1566                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1567                                         rte_rxq_stats_strings[i].offset +
1568                                         q * sizeof(uint64_t));
1569                         val = *stats_ptr;
1570                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1571                                 "rx_q%u_%s", q,
1572                                 rte_rxq_stats_strings[i].name);
1573                         xstats[count++].value = val;
1574                 }
1575         }
1576
1577         /* per-txq stats */
1578         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1579                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1580                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1581                                         rte_txq_stats_strings[i].offset +
1582                                         q * sizeof(uint64_t));
1583                         val = *stats_ptr;
1584                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1585                                 "tx_q%u_%s", q,
1586                                 rte_txq_stats_strings[i].name);
1587                         xstats[count++].value = val;
1588                 }
1589         }
1590
1591         return count + xcount;
1592 }
1593
1594 /* reset ethdev extended statistics */
1595 void
1596 rte_eth_xstats_reset(uint8_t port_id)
1597 {
1598         struct rte_eth_dev *dev;
1599
1600         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1601         dev = &rte_eth_devices[port_id];
1602
1603         /* implemented by the driver */
1604         if (dev->dev_ops->xstats_reset != NULL) {
1605                 (*dev->dev_ops->xstats_reset)(dev);
1606                 return;
1607         }
1608
1609         /* fallback to default */
1610         rte_eth_stats_reset(port_id);
1611 }
1612
1613 static int
1614 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1615                 uint8_t is_rx)
1616 {
1617         struct rte_eth_dev *dev;
1618
1619         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1620
1621         dev = &rte_eth_devices[port_id];
1622
1623         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1624         return (*dev->dev_ops->queue_stats_mapping_set)
1625                         (dev, queue_id, stat_idx, is_rx);
1626 }
1627
1628
1629 int
1630 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1631                 uint8_t stat_idx)
1632 {
1633         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1634                         STAT_QMAP_TX);
1635 }
1636
1637
1638 int
1639 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1640                 uint8_t stat_idx)
1641 {
1642         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1643                         STAT_QMAP_RX);
1644 }
1645
1646
1647 void
1648 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1649 {
1650         struct rte_eth_dev *dev;
1651         const struct rte_eth_desc_lim lim = {
1652                 .nb_max = UINT16_MAX,
1653                 .nb_min = 0,
1654                 .nb_align = 1,
1655         };
1656
1657         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1658         dev = &rte_eth_devices[port_id];
1659
1660         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1661         dev_info->rx_desc_lim = lim;
1662         dev_info->tx_desc_lim = lim;
1663
1664         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1665         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1666         dev_info->pci_dev = dev->pci_dev;
1667         dev_info->driver_name = dev->data->drv_name;
1668 }
1669
1670 void
1671 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1672 {
1673         struct rte_eth_dev *dev;
1674
1675         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1676         dev = &rte_eth_devices[port_id];
1677         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1678 }
1679
1680
1681 int
1682 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1683 {
1684         struct rte_eth_dev *dev;
1685
1686         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1687
1688         dev = &rte_eth_devices[port_id];
1689         *mtu = dev->data->mtu;
1690         return 0;
1691 }
1692
1693 int
1694 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1695 {
1696         int ret;
1697         struct rte_eth_dev *dev;
1698
1699         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1700         dev = &rte_eth_devices[port_id];
1701         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1702
1703         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1704         if (!ret)
1705                 dev->data->mtu = mtu;
1706
1707         return ret;
1708 }
1709
1710 int
1711 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1712 {
1713         struct rte_eth_dev *dev;
1714
1715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1716         dev = &rte_eth_devices[port_id];
1717         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1718                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1719                 return -ENOSYS;
1720         }
1721
1722         if (vlan_id > 4095) {
1723                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1724                                 port_id, (unsigned) vlan_id);
1725                 return -EINVAL;
1726         }
1727         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1728
1729         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1730 }
1731
1732 int
1733 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1734 {
1735         struct rte_eth_dev *dev;
1736
1737         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1738         dev = &rte_eth_devices[port_id];
1739         if (rx_queue_id >= dev->data->nb_rx_queues) {
1740                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1741                 return -EINVAL;
1742         }
1743
1744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1745         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1746
1747         return 0;
1748 }
1749
1750 int
1751 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1752 {
1753         struct rte_eth_dev *dev;
1754
1755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1756         dev = &rte_eth_devices[port_id];
1757         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1758         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1759
1760         return 0;
1761 }
1762
1763 int
1764 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1765 {
1766         struct rte_eth_dev *dev;
1767         int ret = 0;
1768         int mask = 0;
1769         int cur, org = 0;
1770
1771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1772         dev = &rte_eth_devices[port_id];
1773
1774         /*check which option changed by application*/
1775         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1776         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1777         if (cur != org) {
1778                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1779                 mask |= ETH_VLAN_STRIP_MASK;
1780         }
1781
1782         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1783         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1784         if (cur != org) {
1785                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1786                 mask |= ETH_VLAN_FILTER_MASK;
1787         }
1788
1789         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1790         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1791         if (cur != org) {
1792                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1793                 mask |= ETH_VLAN_EXTEND_MASK;
1794         }
1795
1796         /*no change*/
1797         if (mask == 0)
1798                 return ret;
1799
1800         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1801         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1802
1803         return ret;
1804 }
1805
1806 int
1807 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1808 {
1809         struct rte_eth_dev *dev;
1810         int ret = 0;
1811
1812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1813         dev = &rte_eth_devices[port_id];
1814
1815         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1816                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1817
1818         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1819                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1820
1821         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1822                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1823
1824         return ret;
1825 }
1826
1827 int
1828 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1829 {
1830         struct rte_eth_dev *dev;
1831
1832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1833         dev = &rte_eth_devices[port_id];
1834         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1835         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1836
1837         return 0;
1838 }
1839
1840 int
1841 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1842 {
1843         struct rte_eth_dev *dev;
1844
1845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1846         dev = &rte_eth_devices[port_id];
1847         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1848         memset(fc_conf, 0, sizeof(*fc_conf));
1849         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1850 }
1851
1852 int
1853 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1854 {
1855         struct rte_eth_dev *dev;
1856
1857         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1858         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1859                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1860                 return -EINVAL;
1861         }
1862
1863         dev = &rte_eth_devices[port_id];
1864         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1865         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1866 }
1867
1868 int
1869 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1870 {
1871         struct rte_eth_dev *dev;
1872
1873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1874         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1875                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1876                 return -EINVAL;
1877         }
1878
1879         dev = &rte_eth_devices[port_id];
1880         /* High water, low water validation are device specific */
1881         if  (*dev->dev_ops->priority_flow_ctrl_set)
1882                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1883         return -ENOTSUP;
1884 }
1885
1886 static int
1887 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1888                         uint16_t reta_size)
1889 {
1890         uint16_t i, num;
1891
1892         if (!reta_conf)
1893                 return -EINVAL;
1894
1895         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1896                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1897                                                         RTE_RETA_GROUP_SIZE);
1898                 return -EINVAL;
1899         }
1900
1901         num = reta_size / RTE_RETA_GROUP_SIZE;
1902         for (i = 0; i < num; i++) {
1903                 if (reta_conf[i].mask)
1904                         return 0;
1905         }
1906
1907         return -EINVAL;
1908 }
1909
1910 static int
1911 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1912                          uint16_t reta_size,
1913                          uint8_t max_rxq)
1914 {
1915         uint16_t i, idx, shift;
1916
1917         if (!reta_conf)
1918                 return -EINVAL;
1919
1920         if (max_rxq == 0) {
1921                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1922                 return -EINVAL;
1923         }
1924
1925         for (i = 0; i < reta_size; i++) {
1926                 idx = i / RTE_RETA_GROUP_SIZE;
1927                 shift = i % RTE_RETA_GROUP_SIZE;
1928                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1929                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1930                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1931                                 "the maximum rxq index: %u\n", idx, shift,
1932                                 reta_conf[idx].reta[shift], max_rxq);
1933                         return -EINVAL;
1934                 }
1935         }
1936
1937         return 0;
1938 }
1939
1940 int
1941 rte_eth_dev_rss_reta_update(uint8_t port_id,
1942                             struct rte_eth_rss_reta_entry64 *reta_conf,
1943                             uint16_t reta_size)
1944 {
1945         struct rte_eth_dev *dev;
1946         int ret;
1947
1948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1949         /* Check mask bits */
1950         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1951         if (ret < 0)
1952                 return ret;
1953
1954         dev = &rte_eth_devices[port_id];
1955
1956         /* Check entry value */
1957         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1958                                 dev->data->nb_rx_queues);
1959         if (ret < 0)
1960                 return ret;
1961
1962         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1963         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1964 }
1965
1966 int
1967 rte_eth_dev_rss_reta_query(uint8_t port_id,
1968                            struct rte_eth_rss_reta_entry64 *reta_conf,
1969                            uint16_t reta_size)
1970 {
1971         struct rte_eth_dev *dev;
1972         int ret;
1973
1974         if (port_id >= nb_ports) {
1975                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1976                 return -ENODEV;
1977         }
1978
1979         /* Check mask bits */
1980         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1981         if (ret < 0)
1982                 return ret;
1983
1984         dev = &rte_eth_devices[port_id];
1985         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1986         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1987 }
1988
1989 int
1990 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1991 {
1992         struct rte_eth_dev *dev;
1993         uint16_t rss_hash_protos;
1994
1995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1996         rss_hash_protos = rss_conf->rss_hf;
1997         if ((rss_hash_protos != 0) &&
1998             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1999                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2000                                 rss_hash_protos);
2001                 return -EINVAL;
2002         }
2003         dev = &rte_eth_devices[port_id];
2004         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2005         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2006 }
2007
2008 int
2009 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2010                               struct rte_eth_rss_conf *rss_conf)
2011 {
2012         struct rte_eth_dev *dev;
2013
2014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2015         dev = &rte_eth_devices[port_id];
2016         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2017         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2018 }
2019
2020 int
2021 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2022                            struct rte_eth_udp_tunnel *udp_tunnel)
2023 {
2024         struct rte_eth_dev *dev;
2025
2026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2027         if (udp_tunnel == NULL) {
2028                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2029                 return -EINVAL;
2030         }
2031
2032         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2033                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2034                 return -EINVAL;
2035         }
2036
2037         dev = &rte_eth_devices[port_id];
2038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2039         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2040 }
2041
2042 int
2043 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2044                               struct rte_eth_udp_tunnel *udp_tunnel)
2045 {
2046         struct rte_eth_dev *dev;
2047
2048         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2049         dev = &rte_eth_devices[port_id];
2050
2051         if (udp_tunnel == NULL) {
2052                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2053                 return -EINVAL;
2054         }
2055
2056         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2057                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2058                 return -EINVAL;
2059         }
2060
2061         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2062         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2063 }
2064
2065 int
2066 rte_eth_led_on(uint8_t port_id)
2067 {
2068         struct rte_eth_dev *dev;
2069
2070         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2071         dev = &rte_eth_devices[port_id];
2072         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2073         return (*dev->dev_ops->dev_led_on)(dev);
2074 }
2075
2076 int
2077 rte_eth_led_off(uint8_t port_id)
2078 {
2079         struct rte_eth_dev *dev;
2080
2081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2082         dev = &rte_eth_devices[port_id];
2083         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2084         return (*dev->dev_ops->dev_led_off)(dev);
2085 }
2086
2087 /*
2088  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2089  * an empty spot.
2090  */
2091 static int
2092 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2093 {
2094         struct rte_eth_dev_info dev_info;
2095         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2096         unsigned i;
2097
2098         rte_eth_dev_info_get(port_id, &dev_info);
2099
2100         for (i = 0; i < dev_info.max_mac_addrs; i++)
2101                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2102                         return i;
2103
2104         return -1;
2105 }
2106
2107 static const struct ether_addr null_mac_addr;
2108
2109 int
2110 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2111                         uint32_t pool)
2112 {
2113         struct rte_eth_dev *dev;
2114         int index;
2115         uint64_t pool_mask;
2116
2117         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2118         dev = &rte_eth_devices[port_id];
2119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2120
2121         if (is_zero_ether_addr(addr)) {
2122                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2123                         port_id);
2124                 return -EINVAL;
2125         }
2126         if (pool >= ETH_64_POOLS) {
2127                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2128                 return -EINVAL;
2129         }
2130
2131         index = get_mac_addr_index(port_id, addr);
2132         if (index < 0) {
2133                 index = get_mac_addr_index(port_id, &null_mac_addr);
2134                 if (index < 0) {
2135                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2136                                 port_id);
2137                         return -ENOSPC;
2138                 }
2139         } else {
2140                 pool_mask = dev->data->mac_pool_sel[index];
2141
2142                 /* Check if both MAC address and pool is already there, and do nothing */
2143                 if (pool_mask & (1ULL << pool))
2144                         return 0;
2145         }
2146
2147         /* Update NIC */
2148         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2149
2150         /* Update address in NIC data structure */
2151         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2152
2153         /* Update pool bitmap in NIC data structure */
2154         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2155
2156         return 0;
2157 }
2158
2159 int
2160 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2161 {
2162         struct rte_eth_dev *dev;
2163         int index;
2164
2165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2166         dev = &rte_eth_devices[port_id];
2167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2168
2169         index = get_mac_addr_index(port_id, addr);
2170         if (index == 0) {
2171                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2172                 return -EADDRINUSE;
2173         } else if (index < 0)
2174                 return 0;  /* Do nothing if address wasn't found */
2175
2176         /* Update NIC */
2177         (*dev->dev_ops->mac_addr_remove)(dev, index);
2178
2179         /* Update address in NIC data structure */
2180         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2181
2182         /* reset pool bitmap */
2183         dev->data->mac_pool_sel[index] = 0;
2184
2185         return 0;
2186 }
2187
2188 int
2189 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2190 {
2191         struct rte_eth_dev *dev;
2192
2193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2194
2195         if (!is_valid_assigned_ether_addr(addr))
2196                 return -EINVAL;
2197
2198         dev = &rte_eth_devices[port_id];
2199         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2200
2201         /* Update default address in NIC data structure */
2202         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2203
2204         (*dev->dev_ops->mac_addr_set)(dev, addr);
2205
2206         return 0;
2207 }
2208
2209 int
2210 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2211                                 uint16_t rx_mode, uint8_t on)
2212 {
2213         uint16_t num_vfs;
2214         struct rte_eth_dev *dev;
2215         struct rte_eth_dev_info dev_info;
2216
2217         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2218
2219         dev = &rte_eth_devices[port_id];
2220         rte_eth_dev_info_get(port_id, &dev_info);
2221
2222         num_vfs = dev_info.max_vfs;
2223         if (vf > num_vfs) {
2224                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2225                 return -EINVAL;
2226         }
2227
2228         if (rx_mode == 0) {
2229                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2230                 return -EINVAL;
2231         }
2232         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2233         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2234 }
2235
2236 /*
2237  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2238  * an empty spot.
2239  */
2240 static int
2241 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2242 {
2243         struct rte_eth_dev_info dev_info;
2244         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2245         unsigned i;
2246
2247         rte_eth_dev_info_get(port_id, &dev_info);
2248         if (!dev->data->hash_mac_addrs)
2249                 return -1;
2250
2251         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2252                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2253                         ETHER_ADDR_LEN) == 0)
2254                         return i;
2255
2256         return -1;
2257 }
2258
2259 int
2260 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2261                                 uint8_t on)
2262 {
2263         int index;
2264         int ret;
2265         struct rte_eth_dev *dev;
2266
2267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2268
2269         dev = &rte_eth_devices[port_id];
2270         if (is_zero_ether_addr(addr)) {
2271                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2272                         port_id);
2273                 return -EINVAL;
2274         }
2275
2276         index = get_hash_mac_addr_index(port_id, addr);
2277         /* Check if it's already there, and do nothing */
2278         if ((index >= 0) && (on))
2279                 return 0;
2280
2281         if (index < 0) {
2282                 if (!on) {
2283                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2284                                 "set in UTA\n", port_id);
2285                         return -EINVAL;
2286                 }
2287
2288                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2289                 if (index < 0) {
2290                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2291                                         port_id);
2292                         return -ENOSPC;
2293                 }
2294         }
2295
2296         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2297         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2298         if (ret == 0) {
2299                 /* Update address in NIC data structure */
2300                 if (on)
2301                         ether_addr_copy(addr,
2302                                         &dev->data->hash_mac_addrs[index]);
2303                 else
2304                         ether_addr_copy(&null_mac_addr,
2305                                         &dev->data->hash_mac_addrs[index]);
2306         }
2307
2308         return ret;
2309 }
2310
2311 int
2312 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2313 {
2314         struct rte_eth_dev *dev;
2315
2316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2317
2318         dev = &rte_eth_devices[port_id];
2319
2320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2321         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2322 }
2323
2324 int
2325 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2326 {
2327         uint16_t num_vfs;
2328         struct rte_eth_dev *dev;
2329         struct rte_eth_dev_info dev_info;
2330
2331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2332
2333         dev = &rte_eth_devices[port_id];
2334         rte_eth_dev_info_get(port_id, &dev_info);
2335
2336         num_vfs = dev_info.max_vfs;
2337         if (vf > num_vfs) {
2338                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2339                 return -EINVAL;
2340         }
2341
2342         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2343         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2344 }
2345
2346 int
2347 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2348 {
2349         uint16_t num_vfs;
2350         struct rte_eth_dev *dev;
2351         struct rte_eth_dev_info dev_info;
2352
2353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354
2355         dev = &rte_eth_devices[port_id];
2356         rte_eth_dev_info_get(port_id, &dev_info);
2357
2358         num_vfs = dev_info.max_vfs;
2359         if (vf > num_vfs) {
2360                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2361                 return -EINVAL;
2362         }
2363
2364         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2365         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2366 }
2367
2368 int
2369 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2370                                uint64_t vf_mask, uint8_t vlan_on)
2371 {
2372         struct rte_eth_dev *dev;
2373
2374         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2375
2376         dev = &rte_eth_devices[port_id];
2377
2378         if (vlan_id > ETHER_MAX_VLAN_ID) {
2379                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2380                         vlan_id);
2381                 return -EINVAL;
2382         }
2383
2384         if (vf_mask == 0) {
2385                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2386                 return -EINVAL;
2387         }
2388
2389         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2390         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2391                                                    vf_mask, vlan_on);
2392 }
2393
2394 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2395                                         uint16_t tx_rate)
2396 {
2397         struct rte_eth_dev *dev;
2398         struct rte_eth_dev_info dev_info;
2399         struct rte_eth_link link;
2400
2401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2402
2403         dev = &rte_eth_devices[port_id];
2404         rte_eth_dev_info_get(port_id, &dev_info);
2405         link = dev->data->dev_link;
2406
2407         if (queue_idx > dev_info.max_tx_queues) {
2408                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2409                                 "invalid queue id=%d\n", port_id, queue_idx);
2410                 return -EINVAL;
2411         }
2412
2413         if (tx_rate > link.link_speed) {
2414                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2415                                 "bigger than link speed= %d\n",
2416                         tx_rate, link.link_speed);
2417                 return -EINVAL;
2418         }
2419
2420         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2421         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2422 }
2423
2424 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2425                                 uint64_t q_msk)
2426 {
2427         struct rte_eth_dev *dev;
2428         struct rte_eth_dev_info dev_info;
2429         struct rte_eth_link link;
2430
2431         if (q_msk == 0)
2432                 return 0;
2433
2434         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2435
2436         dev = &rte_eth_devices[port_id];
2437         rte_eth_dev_info_get(port_id, &dev_info);
2438         link = dev->data->dev_link;
2439
2440         if (vf > dev_info.max_vfs) {
2441                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2442                                 "invalid vf id=%d\n", port_id, vf);
2443                 return -EINVAL;
2444         }
2445
2446         if (tx_rate > link.link_speed) {
2447                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2448                                 "bigger than link speed= %d\n",
2449                                 tx_rate, link.link_speed);
2450                 return -EINVAL;
2451         }
2452
2453         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2454         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2455 }
2456
2457 int
2458 rte_eth_mirror_rule_set(uint8_t port_id,
2459                         struct rte_eth_mirror_conf *mirror_conf,
2460                         uint8_t rule_id, uint8_t on)
2461 {
2462         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2463
2464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2465         if (mirror_conf->rule_type == 0) {
2466                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2467                 return -EINVAL;
2468         }
2469
2470         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2471                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2472                                 ETH_64_POOLS - 1);
2473                 return -EINVAL;
2474         }
2475
2476         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2477              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2478             (mirror_conf->pool_mask == 0)) {
2479                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2480                 return -EINVAL;
2481         }
2482
2483         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2484             mirror_conf->vlan.vlan_mask == 0) {
2485                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2486                 return -EINVAL;
2487         }
2488
2489         dev = &rte_eth_devices[port_id];
2490         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2491
2492         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2493 }
2494
2495 int
2496 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2497 {
2498         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2499
2500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2501
2502         dev = &rte_eth_devices[port_id];
2503         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2504
2505         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2506 }
2507
2508 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2509 uint16_t
2510 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2511                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2512 {
2513         struct rte_eth_dev *dev;
2514
2515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2516
2517         dev = &rte_eth_devices[port_id];
2518         RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2519         if (queue_id >= dev->data->nb_rx_queues) {
2520                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2521                 return 0;
2522         }
2523         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2524                                                 rx_pkts, nb_pkts);
2525 }
2526
2527 uint16_t
2528 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2529                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2530 {
2531         struct rte_eth_dev *dev;
2532
2533         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2534
2535         dev = &rte_eth_devices[port_id];
2536
2537         RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2538         if (queue_id >= dev->data->nb_tx_queues) {
2539                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2540                 return 0;
2541         }
2542         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2543                                                 tx_pkts, nb_pkts);
2544 }
2545
2546 uint32_t
2547 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2548 {
2549         struct rte_eth_dev *dev;
2550
2551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2552
2553         dev = &rte_eth_devices[port_id];
2554         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2555         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2556 }
2557
2558 int
2559 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2560 {
2561         struct rte_eth_dev *dev;
2562
2563         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2564
2565         dev = &rte_eth_devices[port_id];
2566         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2567         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2568                                                    offset);
2569 }
2570 #endif
2571
2572 int
2573 rte_eth_dev_callback_register(uint8_t port_id,
2574                         enum rte_eth_event_type event,
2575                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2576 {
2577         struct rte_eth_dev *dev;
2578         struct rte_eth_dev_callback *user_cb;
2579
2580         if (!cb_fn)
2581                 return -EINVAL;
2582
2583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2584
2585         dev = &rte_eth_devices[port_id];
2586         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2587
2588         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2589                 if (user_cb->cb_fn == cb_fn &&
2590                         user_cb->cb_arg == cb_arg &&
2591                         user_cb->event == event) {
2592                         break;
2593                 }
2594         }
2595
2596         /* create a new callback. */
2597         if (user_cb == NULL)
2598                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2599                                       sizeof(struct rte_eth_dev_callback), 0);
2600         if (user_cb != NULL) {
2601                 user_cb->cb_fn = cb_fn;
2602                 user_cb->cb_arg = cb_arg;
2603                 user_cb->event = event;
2604                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2605         }
2606
2607         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2608         return (user_cb == NULL) ? -ENOMEM : 0;
2609 }
2610
2611 int
2612 rte_eth_dev_callback_unregister(uint8_t port_id,
2613                         enum rte_eth_event_type event,
2614                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2615 {
2616         int ret;
2617         struct rte_eth_dev *dev;
2618         struct rte_eth_dev_callback *cb, *next;
2619
2620         if (!cb_fn)
2621                 return -EINVAL;
2622
2623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2624
2625         dev = &rte_eth_devices[port_id];
2626         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2627
2628         ret = 0;
2629         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2630
2631                 next = TAILQ_NEXT(cb, next);
2632
2633                 if (cb->cb_fn != cb_fn || cb->event != event ||
2634                                 (cb->cb_arg != (void *)-1 &&
2635                                 cb->cb_arg != cb_arg))
2636                         continue;
2637
2638                 /*
2639                  * if this callback is not executing right now,
2640                  * then remove it.
2641                  */
2642                 if (cb->active == 0) {
2643                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2644                         rte_free(cb);
2645                 } else {
2646                         ret = -EAGAIN;
2647                 }
2648         }
2649
2650         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2651         return ret;
2652 }
2653
2654 void
2655 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2656         enum rte_eth_event_type event)
2657 {
2658         struct rte_eth_dev_callback *cb_lst;
2659         struct rte_eth_dev_callback dev_cb;
2660
2661         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2662         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2663                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2664                         continue;
2665                 dev_cb = *cb_lst;
2666                 cb_lst->active = 1;
2667                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2668                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2669                                                 dev_cb.cb_arg);
2670                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2671                 cb_lst->active = 0;
2672         }
2673         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2674 }
2675
2676 int
2677 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2678 {
2679         uint32_t vec;
2680         struct rte_eth_dev *dev;
2681         struct rte_intr_handle *intr_handle;
2682         uint16_t qid;
2683         int rc;
2684
2685         if (!rte_eth_dev_is_valid_port(port_id)) {
2686                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2687                 return -ENODEV;
2688         }
2689
2690         dev = &rte_eth_devices[port_id];
2691         intr_handle = &dev->pci_dev->intr_handle;
2692         if (!intr_handle->intr_vec) {
2693                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2694                 return -EPERM;
2695         }
2696
2697         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2698                 vec = intr_handle->intr_vec[qid];
2699                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2700                 if (rc && rc != -EEXIST) {
2701                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2702                                         " op %d epfd %d vec %u\n",
2703                                         port_id, qid, op, epfd, vec);
2704                 }
2705         }
2706
2707         return 0;
2708 }
2709
2710 const struct rte_memzone *
2711 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2712                          uint16_t queue_id, size_t size, unsigned align,
2713                          int socket_id)
2714 {
2715         char z_name[RTE_MEMZONE_NAMESIZE];
2716         const struct rte_memzone *mz;
2717
2718         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2719                  dev->driver->pci_drv.name, ring_name,
2720                  dev->data->port_id, queue_id);
2721
2722         mz = rte_memzone_lookup(z_name);
2723         if (mz)
2724                 return mz;
2725
2726         if (is_xen_dom0_supported())
2727                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2728                                                    0, align, RTE_PGSIZE_2M);
2729         else
2730                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2731                                                    0, align);
2732 }
2733
2734 int
2735 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2736                           int epfd, int op, void *data)
2737 {
2738         uint32_t vec;
2739         struct rte_eth_dev *dev;
2740         struct rte_intr_handle *intr_handle;
2741         int rc;
2742
2743         if (!rte_eth_dev_is_valid_port(port_id)) {
2744                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2745                 return -ENODEV;
2746         }
2747
2748         dev = &rte_eth_devices[port_id];
2749         if (queue_id >= dev->data->nb_rx_queues) {
2750                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2751                 return -EINVAL;
2752         }
2753
2754         intr_handle = &dev->pci_dev->intr_handle;
2755         if (!intr_handle->intr_vec) {
2756                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2757                 return -EPERM;
2758         }
2759
2760         vec = intr_handle->intr_vec[queue_id];
2761         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2762         if (rc && rc != -EEXIST) {
2763                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2764                                 " op %d epfd %d vec %u\n",
2765                                 port_id, queue_id, op, epfd, vec);
2766                 return rc;
2767         }
2768
2769         return 0;
2770 }
2771
2772 int
2773 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2774                            uint16_t queue_id)
2775 {
2776         struct rte_eth_dev *dev;
2777
2778         if (!rte_eth_dev_is_valid_port(port_id)) {
2779                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2780                 return -ENODEV;
2781         }
2782
2783         dev = &rte_eth_devices[port_id];
2784
2785         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2786         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2787 }
2788
2789 int
2790 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2791                             uint16_t queue_id)
2792 {
2793         struct rte_eth_dev *dev;
2794
2795         if (!rte_eth_dev_is_valid_port(port_id)) {
2796                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2797                 return -ENODEV;
2798         }
2799
2800         dev = &rte_eth_devices[port_id];
2801
2802         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2803         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2804 }
2805
2806 #ifdef RTE_NIC_BYPASS
2807 int rte_eth_dev_bypass_init(uint8_t port_id)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2812
2813         dev = &rte_eth_devices[port_id];
2814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2815         (*dev->dev_ops->bypass_init)(dev);
2816         return 0;
2817 }
2818
2819 int
2820 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825
2826         dev = &rte_eth_devices[port_id];
2827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2828         (*dev->dev_ops->bypass_state_show)(dev, state);
2829         return 0;
2830 }
2831
2832 int
2833 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2834 {
2835         struct rte_eth_dev *dev;
2836
2837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838
2839         dev = &rte_eth_devices[port_id];
2840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2841         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2842         return 0;
2843 }
2844
2845 int
2846 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2847 {
2848         struct rte_eth_dev *dev;
2849
2850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2851
2852         dev = &rte_eth_devices[port_id];
2853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2854         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2855         return 0;
2856 }
2857
2858 int
2859 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2860 {
2861         struct rte_eth_dev *dev;
2862
2863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2864
2865         dev = &rte_eth_devices[port_id];
2866
2867         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2868         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2869         return 0;
2870 }
2871
2872 int
2873 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2874 {
2875         struct rte_eth_dev *dev;
2876
2877         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2878
2879         dev = &rte_eth_devices[port_id];
2880
2881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2882         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2883         return 0;
2884 }
2885
2886 int
2887 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2888 {
2889         struct rte_eth_dev *dev;
2890
2891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2892
2893         dev = &rte_eth_devices[port_id];
2894
2895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2896         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2897         return 0;
2898 }
2899
2900 int
2901 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2902 {
2903         struct rte_eth_dev *dev;
2904
2905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2906
2907         dev = &rte_eth_devices[port_id];
2908
2909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2910         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2911         return 0;
2912 }
2913
2914 int
2915 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2916 {
2917         struct rte_eth_dev *dev;
2918
2919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920
2921         dev = &rte_eth_devices[port_id];
2922
2923         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2924         (*dev->dev_ops->bypass_wd_reset)(dev);
2925         return 0;
2926 }
2927 #endif
2928
2929 int
2930 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2931 {
2932         struct rte_eth_dev *dev;
2933
2934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2935
2936         dev = &rte_eth_devices[port_id];
2937         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2938         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2939                                 RTE_ETH_FILTER_NOP, NULL);
2940 }
2941
2942 int
2943 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2944                        enum rte_filter_op filter_op, void *arg)
2945 {
2946         struct rte_eth_dev *dev;
2947
2948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2949
2950         dev = &rte_eth_devices[port_id];
2951         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2952         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2953 }
2954
2955 void *
2956 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2957                 rte_rx_callback_fn fn, void *user_param)
2958 {
2959 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2960         rte_errno = ENOTSUP;
2961         return NULL;
2962 #endif
2963         /* check input parameters */
2964         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2965                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2966                 rte_errno = EINVAL;
2967                 return NULL;
2968         }
2969
2970         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2971
2972         if (cb == NULL) {
2973                 rte_errno = ENOMEM;
2974                 return NULL;
2975         }
2976
2977         cb->fn.rx = fn;
2978         cb->param = user_param;
2979
2980         /* Add the callbacks in fifo order. */
2981         struct rte_eth_rxtx_callback *tail =
2982                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2983
2984         if (!tail) {
2985                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2986
2987         } else {
2988                 while (tail->next)
2989                         tail = tail->next;
2990                 tail->next = cb;
2991         }
2992
2993         return cb;
2994 }
2995
2996 void *
2997 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2998                 rte_tx_callback_fn fn, void *user_param)
2999 {
3000 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3001         rte_errno = ENOTSUP;
3002         return NULL;
3003 #endif
3004         /* check input parameters */
3005         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3006                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3007                 rte_errno = EINVAL;
3008                 return NULL;
3009         }
3010
3011         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3012
3013         if (cb == NULL) {
3014                 rte_errno = ENOMEM;
3015                 return NULL;
3016         }
3017
3018         cb->fn.tx = fn;
3019         cb->param = user_param;
3020
3021         /* Add the callbacks in fifo order. */
3022         struct rte_eth_rxtx_callback *tail =
3023                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3024
3025         if (!tail) {
3026                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3027
3028         } else {
3029                 while (tail->next)
3030                         tail = tail->next;
3031                 tail->next = cb;
3032         }
3033
3034         return cb;
3035 }
3036
3037 int
3038 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3039                 struct rte_eth_rxtx_callback *user_cb)
3040 {
3041 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3042         return -ENOTSUP;
3043 #endif
3044         /* Check input parameters. */
3045         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3046                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3047                 return -EINVAL;
3048         }
3049
3050         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3051         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3052         struct rte_eth_rxtx_callback *prev_cb;
3053
3054         /* Reset head pointer and remove user cb if first in the list. */
3055         if (cb == user_cb) {
3056                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3057                 return 0;
3058         }
3059
3060         /* Remove the user cb from the callback list. */
3061         do {
3062                 prev_cb = cb;
3063                 cb = cb->next;
3064
3065                 if (cb == user_cb) {
3066                         prev_cb->next = user_cb->next;
3067                         return 0;
3068                 }
3069
3070         } while (cb != NULL);
3071
3072         /* Callback wasn't found. */
3073         return -EINVAL;
3074 }
3075
3076 int
3077 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3078                 struct rte_eth_rxtx_callback *user_cb)
3079 {
3080 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3081         return -ENOTSUP;
3082 #endif
3083         /* Check input parameters. */
3084         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3085                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3086                 return -EINVAL;
3087         }
3088
3089         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3090         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3091         struct rte_eth_rxtx_callback *prev_cb;
3092
3093         /* Reset head pointer and remove user cb if first in the list. */
3094         if (cb == user_cb) {
3095                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3096                 return 0;
3097         }
3098
3099         /* Remove the user cb from the callback list. */
3100         do {
3101                 prev_cb = cb;
3102                 cb = cb->next;
3103
3104                 if (cb == user_cb) {
3105                         prev_cb->next = user_cb->next;
3106                         return 0;
3107                 }
3108
3109         } while (cb != NULL);
3110
3111         /* Callback wasn't found. */
3112         return -EINVAL;
3113 }
3114
3115 int
3116 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3117         struct rte_eth_rxq_info *qinfo)
3118 {
3119         struct rte_eth_dev *dev;
3120
3121         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3122
3123         if (qinfo == NULL)
3124                 return -EINVAL;
3125
3126         dev = &rte_eth_devices[port_id];
3127         if (queue_id >= dev->data->nb_rx_queues) {
3128                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3129                 return -EINVAL;
3130         }
3131
3132         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3133
3134         memset(qinfo, 0, sizeof(*qinfo));
3135         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3136         return 0;
3137 }
3138
3139 int
3140 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3141         struct rte_eth_txq_info *qinfo)
3142 {
3143         struct rte_eth_dev *dev;
3144
3145         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3146
3147         if (qinfo == NULL)
3148                 return -EINVAL;
3149
3150         dev = &rte_eth_devices[port_id];
3151         if (queue_id >= dev->data->nb_tx_queues) {
3152                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3153                 return -EINVAL;
3154         }
3155
3156         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3157
3158         memset(qinfo, 0, sizeof(*qinfo));
3159         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3160         return 0;
3161 }
3162
3163 int
3164 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3165                              struct ether_addr *mc_addr_set,
3166                              uint32_t nb_mc_addr)
3167 {
3168         struct rte_eth_dev *dev;
3169
3170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3171
3172         dev = &rte_eth_devices[port_id];
3173         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3174         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3175 }
3176
3177 int
3178 rte_eth_timesync_enable(uint8_t port_id)
3179 {
3180         struct rte_eth_dev *dev;
3181
3182         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3183         dev = &rte_eth_devices[port_id];
3184
3185         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3186         return (*dev->dev_ops->timesync_enable)(dev);
3187 }
3188
3189 int
3190 rte_eth_timesync_disable(uint8_t port_id)
3191 {
3192         struct rte_eth_dev *dev;
3193
3194         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3195         dev = &rte_eth_devices[port_id];
3196
3197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3198         return (*dev->dev_ops->timesync_disable)(dev);
3199 }
3200
3201 int
3202 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3203                                    uint32_t flags)
3204 {
3205         struct rte_eth_dev *dev;
3206
3207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3208         dev = &rte_eth_devices[port_id];
3209
3210         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3211         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3212 }
3213
3214 int
3215 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3216 {
3217         struct rte_eth_dev *dev;
3218
3219         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3220         dev = &rte_eth_devices[port_id];
3221
3222         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3223         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3224 }
3225
3226 int
3227 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3228 {
3229         struct rte_eth_dev *dev;
3230
3231         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3232         dev = &rte_eth_devices[port_id];
3233
3234         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3235         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3236 }
3237
3238 int
3239 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3240 {
3241         struct rte_eth_dev *dev;
3242
3243         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3244         dev = &rte_eth_devices[port_id];
3245
3246         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3247         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3248 }
3249
3250 int
3251 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3252 {
3253         struct rte_eth_dev *dev;
3254
3255         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3256         dev = &rte_eth_devices[port_id];
3257
3258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3259         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3260 }
3261
3262 int
3263 rte_eth_dev_get_reg_length(uint8_t port_id)
3264 {
3265         struct rte_eth_dev *dev;
3266
3267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3268
3269         dev = &rte_eth_devices[port_id];
3270         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3271         return (*dev->dev_ops->get_reg_length)(dev);
3272 }
3273
3274 int
3275 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3276 {
3277         struct rte_eth_dev *dev;
3278
3279         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3280
3281         dev = &rte_eth_devices[port_id];
3282         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3283         return (*dev->dev_ops->get_reg)(dev, info);
3284 }
3285
3286 int
3287 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3288 {
3289         struct rte_eth_dev *dev;
3290
3291         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3292
3293         dev = &rte_eth_devices[port_id];
3294         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3295         return (*dev->dev_ops->get_eeprom_length)(dev);
3296 }
3297
3298 int
3299 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3300 {
3301         struct rte_eth_dev *dev;
3302
3303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3304
3305         dev = &rte_eth_devices[port_id];
3306         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3307         return (*dev->dev_ops->get_eeprom)(dev, info);
3308 }
3309
3310 int
3311 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3312 {
3313         struct rte_eth_dev *dev;
3314
3315         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3316
3317         dev = &rte_eth_devices[port_id];
3318         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3319         return (*dev->dev_ops->set_eeprom)(dev, info);
3320 }
3321
3322 int
3323 rte_eth_dev_get_dcb_info(uint8_t port_id,
3324                              struct rte_eth_dcb_info *dcb_info)
3325 {
3326         struct rte_eth_dev *dev;
3327
3328         if (!rte_eth_dev_is_valid_port(port_id)) {
3329                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3330                 return -ENODEV;
3331         }
3332
3333         dev = &rte_eth_devices[port_id];
3334         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3335
3336         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3337         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3338 }
3339
3340 void
3341 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3342 {
3343         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3344                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3345                                 eth_dev, pci_dev);
3346                 return;
3347         }
3348
3349         eth_dev->data->dev_flags = 0;
3350         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3351                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3352         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3353                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3354
3355         eth_dev->data->kdrv = pci_dev->kdrv;
3356         eth_dev->data->numa_node = pci_dev->numa_node;
3357         eth_dev->data->drv_name = pci_dev->driver->name;
3358 }