dfd6c0b49f164bc1ff4cc6cfa5c26a5b535a8ab2
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return retval;                                  \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
145         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
146         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
147                 rx_nombuf)},
148 };
149
150 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
151
152 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
153         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
154         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
155         {"errors", offsetof(struct rte_eth_stats, q_errors)},
156 };
157
158 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
159                 sizeof(rte_rxq_stats_strings[0]))
160
161 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
162         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
163         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
164 };
165 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
166                 sizeof(rte_txq_stats_strings[0]))
167
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 enum {
189         DEV_DETACHED = 0,
190         DEV_ATTACHED
191 };
192
193 static void
194 rte_eth_dev_data_alloc(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
200                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
202                                 rte_socket_id(), flags);
203         } else
204                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
205         if (mz == NULL)
206                 rte_panic("Cannot allocate memzone for ethernet port data\n");
207
208         rte_eth_dev_data = mz->addr;
209         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210                 memset(rte_eth_dev_data, 0,
211                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
212 }
213
214 struct rte_eth_dev *
215 rte_eth_dev_allocated(const char *name)
216 {
217         unsigned i;
218
219         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
220                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
221                     strcmp(rte_eth_devices[i].data->name, name) == 0)
222                         return &rte_eth_devices[i];
223         }
224         return NULL;
225 }
226
227 static uint8_t
228 rte_eth_dev_find_free_port(void)
229 {
230         unsigned i;
231
232         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
233                 if (rte_eth_devices[i].attached == DEV_DETACHED)
234                         return i;
235         }
236         return RTE_MAX_ETHPORTS;
237 }
238
239 struct rte_eth_dev *
240 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
241 {
242         uint8_t port_id;
243         struct rte_eth_dev *eth_dev;
244
245         port_id = rte_eth_dev_find_free_port();
246         if (port_id == RTE_MAX_ETHPORTS) {
247                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
248                 return NULL;
249         }
250
251         if (rte_eth_dev_data == NULL)
252                 rte_eth_dev_data_alloc();
253
254         if (rte_eth_dev_allocated(name) != NULL) {
255                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
256                                 name);
257                 return NULL;
258         }
259
260         eth_dev = &rte_eth_devices[port_id];
261         eth_dev->data = &rte_eth_dev_data[port_id];
262         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
263         eth_dev->data->port_id = port_id;
264         eth_dev->attached = DEV_ATTACHED;
265         eth_dev->dev_type = type;
266         nb_ports++;
267         return eth_dev;
268 }
269
270 static int
271 rte_eth_dev_create_unique_device_name(char *name, size_t size,
272                 struct rte_pci_device *pci_dev)
273 {
274         int ret;
275
276         if ((name == NULL) || (pci_dev == NULL))
277                 return -EINVAL;
278
279         ret = snprintf(name, size, "%d:%d.%d",
280                         pci_dev->addr.bus, pci_dev->addr.devid,
281                         pci_dev->addr.function);
282         if (ret < 0)
283                 return ret;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
289 {
290         if (eth_dev == NULL)
291                 return -EINVAL;
292
293         eth_dev->attached = DEV_DETACHED;
294         nb_ports--;
295         return 0;
296 }
297
298 static int
299 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
300                  struct rte_pci_device *pci_dev)
301 {
302         struct eth_driver    *eth_drv;
303         struct rte_eth_dev *eth_dev;
304         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
305
306         int diag;
307
308         eth_drv = (struct eth_driver *)pci_drv;
309
310         /* Create unique Ethernet device name using PCI address */
311         rte_eth_dev_create_unique_device_name(ethdev_name,
312                         sizeof(ethdev_name), pci_dev);
313
314         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
315         if (eth_dev == NULL)
316                 return -ENOMEM;
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
319                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
320                                   eth_drv->dev_private_size,
321                                   RTE_CACHE_LINE_SIZE);
322                 if (eth_dev->data->dev_private == NULL)
323                         rte_panic("Cannot allocate memzone for private port data\n");
324         }
325         eth_dev->pci_dev = pci_dev;
326         eth_dev->driver = eth_drv;
327         eth_dev->data->rx_mbuf_alloc_failed = 0;
328
329         /* init user callbacks */
330         TAILQ_INIT(&(eth_dev->link_intr_cbs));
331
332         /*
333          * Set the default MTU.
334          */
335         eth_dev->data->mtu = ETHER_MTU;
336
337         /* Invoke PMD device initialization function */
338         diag = (*eth_drv->eth_dev_init)(eth_dev);
339         if (diag == 0)
340                 return 0;
341
342         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
343                         pci_drv->name,
344                         (unsigned) pci_dev->id.vendor_id,
345                         (unsigned) pci_dev->id.device_id);
346         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
347                 rte_free(eth_dev->data->dev_private);
348         rte_eth_dev_release_port(eth_dev);
349         return diag;
350 }
351
352 static int
353 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
354 {
355         const struct eth_driver *eth_drv;
356         struct rte_eth_dev *eth_dev;
357         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
358         int ret;
359
360         if (pci_dev == NULL)
361                 return -EINVAL;
362
363         /* Create unique Ethernet device name using PCI address */
364         rte_eth_dev_create_unique_device_name(ethdev_name,
365                         sizeof(ethdev_name), pci_dev);
366
367         eth_dev = rte_eth_dev_allocated(ethdev_name);
368         if (eth_dev == NULL)
369                 return -ENODEV;
370
371         eth_drv = (const struct eth_driver *)pci_dev->driver;
372
373         /* Invoke PMD device uninit function */
374         if (*eth_drv->eth_dev_uninit) {
375                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
376                 if (ret)
377                         return ret;
378         }
379
380         /* free ether device */
381         rte_eth_dev_release_port(eth_dev);
382
383         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
384                 rte_free(eth_dev->data->dev_private);
385
386         eth_dev->pci_dev = NULL;
387         eth_dev->driver = NULL;
388         eth_dev->data = NULL;
389
390         return 0;
391 }
392
393 /**
394  * Register an Ethernet [Poll Mode] driver.
395  *
396  * Function invoked by the initialization function of an Ethernet driver
397  * to simultaneously register itself as a PCI driver and as an Ethernet
398  * Poll Mode Driver.
399  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
400  * structure embedded in the *eth_drv* structure, after having stored the
401  * address of the rte_eth_dev_init() function in the *devinit* field of
402  * the *pci_drv* structure.
403  * During the PCI probing phase, the rte_eth_dev_init() function is
404  * invoked for each PCI [Ethernet device] matching the embedded PCI
405  * identifiers provided by the driver.
406  */
407 void
408 rte_eth_driver_register(struct eth_driver *eth_drv)
409 {
410         eth_drv->pci_drv.devinit = rte_eth_dev_init;
411         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
412         rte_eal_pci_register(&eth_drv->pci_drv);
413 }
414
415 int
416 rte_eth_dev_is_valid_port(uint8_t port_id)
417 {
418         if (port_id >= RTE_MAX_ETHPORTS ||
419             rte_eth_devices[port_id].attached != DEV_ATTACHED)
420                 return 0;
421         else
422                 return 1;
423 }
424
425 int
426 rte_eth_dev_socket_id(uint8_t port_id)
427 {
428         if (!rte_eth_dev_is_valid_port(port_id))
429                 return -1;
430         return rte_eth_devices[port_id].data->numa_node;
431 }
432
433 uint8_t
434 rte_eth_dev_count(void)
435 {
436         return nb_ports;
437 }
438
439 static enum rte_eth_dev_type
440 rte_eth_dev_get_device_type(uint8_t port_id)
441 {
442         if (!rte_eth_dev_is_valid_port(port_id))
443                 return RTE_ETH_DEV_UNKNOWN;
444         return rte_eth_devices[port_id].dev_type;
445 }
446
447 static int
448 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
449 {
450         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
451
452         if (addr == NULL) {
453                 PMD_DEBUG_TRACE("Null pointer is specified\n");
454                 return -EINVAL;
455         }
456
457         *addr = rte_eth_devices[port_id].pci_dev->addr;
458         return 0;
459 }
460
461 static int
462 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
463 {
464         char *tmp;
465
466         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
467
468         if (name == NULL) {
469                 PMD_DEBUG_TRACE("Null pointer is specified\n");
470                 return -EINVAL;
471         }
472
473         /* shouldn't check 'rte_eth_devices[i].data',
474          * because it might be overwritten by VDEV PMD */
475         tmp = rte_eth_dev_data[port_id].name;
476         strcpy(name, tmp);
477         return 0;
478 }
479
480 static int
481 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
482 {
483         int i;
484
485         if (name == NULL) {
486                 PMD_DEBUG_TRACE("Null pointer is specified\n");
487                 return -EINVAL;
488         }
489
490         *port_id = RTE_MAX_ETHPORTS;
491
492         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
493
494                 if (!strncmp(name,
495                         rte_eth_dev_data[i].name, strlen(name))) {
496
497                         *port_id = i;
498
499                         return 0;
500                 }
501         }
502         return -ENODEV;
503 }
504
505 static int
506 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
507 {
508         int i;
509         struct rte_pci_device *pci_dev = NULL;
510
511         if (addr == NULL) {
512                 PMD_DEBUG_TRACE("Null pointer is specified\n");
513                 return -EINVAL;
514         }
515
516         *port_id = RTE_MAX_ETHPORTS;
517
518         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
519
520                 pci_dev = rte_eth_devices[i].pci_dev;
521
522                 if (pci_dev &&
523                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
524
525                         *port_id = i;
526
527                         return 0;
528                 }
529         }
530         return -ENODEV;
531 }
532
533 static int
534 rte_eth_dev_is_detachable(uint8_t port_id)
535 {
536         uint32_t dev_flags;
537
538         if (!rte_eth_dev_is_valid_port(port_id)) {
539                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
540                 return -EINVAL;
541         }
542
543         switch (rte_eth_devices[port_id].data->kdrv) {
544         case RTE_KDRV_IGB_UIO:
545         case RTE_KDRV_UIO_GENERIC:
546         case RTE_KDRV_NIC_UIO:
547         case RTE_KDRV_NONE:
548                 break;
549         case RTE_KDRV_VFIO:
550         default:
551                 return -ENOTSUP;
552         }
553         dev_flags = rte_eth_devices[port_id].data->dev_flags;
554         return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
555 }
556
557 /* attach the new physical device, then store port_id of the device */
558 static int
559 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
560 {
561         if ((addr == NULL) || (port_id == NULL))
562                 goto err;
563
564         /* re-construct pci_device_list */
565         if (rte_eal_pci_scan())
566                 goto err;
567         /* Invoke probe func of the driver can handle the new device. */
568         if (rte_eal_pci_probe_one(addr))
569                 goto err;
570
571         if (rte_eth_dev_get_port_by_addr(addr, port_id))
572                 goto err;
573
574         return 0;
575 err:
576         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
577         return -1;
578 }
579
580 /* detach the new physical device, then store pci_addr of the device */
581 static int
582 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
583 {
584         struct rte_pci_addr freed_addr;
585         struct rte_pci_addr vp;
586
587         if (addr == NULL)
588                 goto err;
589
590         /* check whether the driver supports detach feature, or not */
591         if (rte_eth_dev_is_detachable(port_id))
592                 goto err;
593
594         /* get pci address by port id */
595         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
596                 goto err;
597
598         /* Zeroed pci addr means the port comes from virtual device */
599         vp.domain = vp.bus = vp.devid = vp.function = 0;
600         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
601                 goto err;
602
603         /* invoke devuninit func of the pci driver,
604          * also remove the device from pci_device_list */
605         if (rte_eal_pci_detach(&freed_addr))
606                 goto err;
607
608         *addr = freed_addr;
609         return 0;
610 err:
611         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
612         return -1;
613 }
614
615 /* attach the new virtual device, then store port_id of the device */
616 static int
617 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
618 {
619         char *name = NULL, *args = NULL;
620         int ret = -1;
621
622         if ((vdevargs == NULL) || (port_id == NULL))
623                 goto end;
624
625         /* parse vdevargs, then retrieve device name and args */
626         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
627                 goto end;
628
629         /* walk around dev_driver_list to find the driver of the device,
630          * then invoke probe function of the driver.
631          * rte_eal_vdev_init() updates port_id allocated after
632          * initialization.
633          */
634         if (rte_eal_vdev_init(name, args))
635                 goto end;
636
637         if (rte_eth_dev_get_port_by_name(name, port_id))
638                 goto end;
639
640         ret = 0;
641 end:
642         if (name)
643                 free(name);
644         if (args)
645                 free(args);
646
647         if (ret < 0)
648                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
649         return ret;
650 }
651
652 /* detach the new virtual device, then store the name of the device */
653 static int
654 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
655 {
656         char name[RTE_ETH_NAME_MAX_LEN];
657
658         if (vdevname == NULL)
659                 goto err;
660
661         /* check whether the driver supports detach feature, or not */
662         if (rte_eth_dev_is_detachable(port_id))
663                 goto err;
664
665         /* get device name by port id */
666         if (rte_eth_dev_get_name_by_port(port_id, name))
667                 goto err;
668         /* walk around dev_driver_list to find the driver of the device,
669          * then invoke uninit function of the driver */
670         if (rte_eal_vdev_uninit(name))
671                 goto err;
672
673         strncpy(vdevname, name, sizeof(name));
674         return 0;
675 err:
676         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
677         return -1;
678 }
679
680 /* attach the new device, then store port_id of the device */
681 int
682 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
683 {
684         struct rte_pci_addr addr;
685
686         if ((devargs == NULL) || (port_id == NULL))
687                 return -EINVAL;
688
689         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
690                 return rte_eth_dev_attach_pdev(&addr, port_id);
691         else
692                 return rte_eth_dev_attach_vdev(devargs, port_id);
693 }
694
695 /* detach the device, then store the name of the device */
696 int
697 rte_eth_dev_detach(uint8_t port_id, char *name)
698 {
699         struct rte_pci_addr addr;
700         int ret;
701
702         if (name == NULL)
703                 return -EINVAL;
704
705         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
706                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
707                 if (ret < 0)
708                         return ret;
709
710                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
711                 if (ret == 0)
712                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
713                                 "%04x:%02x:%02x.%d",
714                                 addr.domain, addr.bus,
715                                 addr.devid, addr.function);
716
717                 return ret;
718         } else
719                 return rte_eth_dev_detach_vdev(port_id, name);
720 }
721
722 static int
723 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
724 {
725         uint16_t old_nb_queues = dev->data->nb_rx_queues;
726         void **rxq;
727         unsigned i;
728
729         if (dev->data->rx_queues == NULL) { /* first time configuration */
730                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
731                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
732                                 RTE_CACHE_LINE_SIZE);
733                 if (dev->data->rx_queues == NULL) {
734                         dev->data->nb_rx_queues = 0;
735                         return -(ENOMEM);
736                 }
737         } else { /* re-configure */
738                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
739
740                 rxq = dev->data->rx_queues;
741
742                 for (i = nb_queues; i < old_nb_queues; i++)
743                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
744                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
745                                 RTE_CACHE_LINE_SIZE);
746                 if (rxq == NULL)
747                         return -(ENOMEM);
748                 if (nb_queues > old_nb_queues) {
749                         uint16_t new_qs = nb_queues - old_nb_queues;
750
751                         memset(rxq + old_nb_queues, 0,
752                                 sizeof(rxq[0]) * new_qs);
753                 }
754
755                 dev->data->rx_queues = rxq;
756
757         }
758         dev->data->nb_rx_queues = nb_queues;
759         return 0;
760 }
761
762 int
763 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
764 {
765         struct rte_eth_dev *dev;
766
767         /* This function is only safe when called from the primary process
768          * in a multi-process setup*/
769         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
770
771         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
772
773         dev = &rte_eth_devices[port_id];
774         if (rx_queue_id >= dev->data->nb_rx_queues) {
775                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
776                 return -EINVAL;
777         }
778
779         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
780
781         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
782                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
783                         " already started\n",
784                         rx_queue_id, port_id);
785                 return 0;
786         }
787
788         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
789
790 }
791
792 int
793 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
794 {
795         struct rte_eth_dev *dev;
796
797         /* This function is only safe when called from the primary process
798          * in a multi-process setup*/
799         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
800
801         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
802
803         dev = &rte_eth_devices[port_id];
804         if (rx_queue_id >= dev->data->nb_rx_queues) {
805                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
806                 return -EINVAL;
807         }
808
809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
810
811         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
812                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
813                         " already stopped\n",
814                         rx_queue_id, port_id);
815                 return 0;
816         }
817
818         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
819
820 }
821
822 int
823 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
824 {
825         struct rte_eth_dev *dev;
826
827         /* This function is only safe when called from the primary process
828          * in a multi-process setup*/
829         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
830
831         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
832
833         dev = &rte_eth_devices[port_id];
834         if (tx_queue_id >= dev->data->nb_tx_queues) {
835                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
836                 return -EINVAL;
837         }
838
839         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
840
841         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
842                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
843                         " already started\n",
844                         tx_queue_id, port_id);
845                 return 0;
846         }
847
848         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
849
850 }
851
852 int
853 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
854 {
855         struct rte_eth_dev *dev;
856
857         /* This function is only safe when called from the primary process
858          * in a multi-process setup*/
859         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
860
861         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
862
863         dev = &rte_eth_devices[port_id];
864         if (tx_queue_id >= dev->data->nb_tx_queues) {
865                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
866                 return -EINVAL;
867         }
868
869         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
870
871         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
872                 PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
873                         " already stopped\n",
874                         tx_queue_id, port_id);
875                 return 0;
876         }
877
878         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
879
880 }
881
882 static int
883 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
884 {
885         uint16_t old_nb_queues = dev->data->nb_tx_queues;
886         void **txq;
887         unsigned i;
888
889         if (dev->data->tx_queues == NULL) { /* first time configuration */
890                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
891                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
892                                                    RTE_CACHE_LINE_SIZE);
893                 if (dev->data->tx_queues == NULL) {
894                         dev->data->nb_tx_queues = 0;
895                         return -(ENOMEM);
896                 }
897         } else { /* re-configure */
898                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
899
900                 txq = dev->data->tx_queues;
901
902                 for (i = nb_queues; i < old_nb_queues; i++)
903                         (*dev->dev_ops->tx_queue_release)(txq[i]);
904                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
905                                   RTE_CACHE_LINE_SIZE);
906                 if (txq == NULL)
907                         return -ENOMEM;
908                 if (nb_queues > old_nb_queues) {
909                         uint16_t new_qs = nb_queues - old_nb_queues;
910
911                         memset(txq + old_nb_queues, 0,
912                                sizeof(txq[0]) * new_qs);
913                 }
914
915                 dev->data->tx_queues = txq;
916
917         }
918         dev->data->nb_tx_queues = nb_queues;
919         return 0;
920 }
921
922 int
923 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
924                       const struct rte_eth_conf *dev_conf)
925 {
926         struct rte_eth_dev *dev;
927         struct rte_eth_dev_info dev_info;
928         int diag;
929
930         /* This function is only safe when called from the primary process
931          * in a multi-process setup*/
932         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
933
934         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
935
936         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
937                 PMD_DEBUG_TRACE(
938                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
939                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
940                 return -EINVAL;
941         }
942
943         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
944                 PMD_DEBUG_TRACE(
945                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
946                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
947                 return -EINVAL;
948         }
949
950         dev = &rte_eth_devices[port_id];
951
952         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
953         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
954
955         if (dev->data->dev_started) {
956                 PMD_DEBUG_TRACE(
957                     "port %d must be stopped to allow configuration\n", port_id);
958                 return -EBUSY;
959         }
960
961         /*
962          * Check that the numbers of RX and TX queues are not greater
963          * than the maximum number of RX and TX queues supported by the
964          * configured device.
965          */
966         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
967         if (nb_rx_q > dev_info.max_rx_queues) {
968                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
969                                 port_id, nb_rx_q, dev_info.max_rx_queues);
970                 return -EINVAL;
971         }
972         if (nb_rx_q == 0) {
973                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
974                 return -EINVAL;
975         }
976
977         if (nb_tx_q > dev_info.max_tx_queues) {
978                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
979                                 port_id, nb_tx_q, dev_info.max_tx_queues);
980                 return -EINVAL;
981         }
982         if (nb_tx_q == 0) {
983                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
984                 return -EINVAL;
985         }
986
987         /* Copy the dev_conf parameter into the dev structure */
988         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
989
990         /*
991          * If link state interrupt is enabled, check that the
992          * device supports it.
993          */
994         if ((dev_conf->intr_conf.lsc == 1) &&
995                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
996                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
997                                         dev->data->drv_name);
998                         return -EINVAL;
999         }
1000
1001         /*
1002          * If jumbo frames are enabled, check that the maximum RX packet
1003          * length is supported by the configured device.
1004          */
1005         if (dev_conf->rxmode.jumbo_frame == 1) {
1006                 if (dev_conf->rxmode.max_rx_pkt_len >
1007                     dev_info.max_rx_pktlen) {
1008                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1009                                 " > max valid value %u\n",
1010                                 port_id,
1011                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1012                                 (unsigned)dev_info.max_rx_pktlen);
1013                         return -EINVAL;
1014                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1015                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1016                                 " < min valid value %u\n",
1017                                 port_id,
1018                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1019                                 (unsigned)ETHER_MIN_LEN);
1020                         return -EINVAL;
1021                 }
1022         } else {
1023                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1024                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1025                         /* Use default value */
1026                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1027                                                         ETHER_MAX_LEN;
1028         }
1029
1030         /*
1031          * Setup new number of RX/TX queues and reconfigure device.
1032          */
1033         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1034         if (diag != 0) {
1035                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1036                                 port_id, diag);
1037                 return diag;
1038         }
1039
1040         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1041         if (diag != 0) {
1042                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1043                                 port_id, diag);
1044                 rte_eth_dev_rx_queue_config(dev, 0);
1045                 return diag;
1046         }
1047
1048         diag = (*dev->dev_ops->dev_configure)(dev);
1049         if (diag != 0) {
1050                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1051                                 port_id, diag);
1052                 rte_eth_dev_rx_queue_config(dev, 0);
1053                 rte_eth_dev_tx_queue_config(dev, 0);
1054                 return diag;
1055         }
1056
1057         return 0;
1058 }
1059
1060 static void
1061 rte_eth_dev_config_restore(uint8_t port_id)
1062 {
1063         struct rte_eth_dev *dev;
1064         struct rte_eth_dev_info dev_info;
1065         struct ether_addr addr;
1066         uint16_t i;
1067         uint32_t pool = 0;
1068
1069         dev = &rte_eth_devices[port_id];
1070
1071         rte_eth_dev_info_get(port_id, &dev_info);
1072
1073         if (RTE_ETH_DEV_SRIOV(dev).active)
1074                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1075
1076         /* replay MAC address configuration */
1077         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1078                 addr = dev->data->mac_addrs[i];
1079
1080                 /* skip zero address */
1081                 if (is_zero_ether_addr(&addr))
1082                         continue;
1083
1084                 /* add address to the hardware */
1085                 if  (*dev->dev_ops->mac_addr_add &&
1086                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1087                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1088                 else {
1089                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1090                                         port_id);
1091                         /* exit the loop but not return an error */
1092                         break;
1093                 }
1094         }
1095
1096         /* replay promiscuous configuration */
1097         if (rte_eth_promiscuous_get(port_id) == 1)
1098                 rte_eth_promiscuous_enable(port_id);
1099         else if (rte_eth_promiscuous_get(port_id) == 0)
1100                 rte_eth_promiscuous_disable(port_id);
1101
1102         /* replay all multicast configuration */
1103         if (rte_eth_allmulticast_get(port_id) == 1)
1104                 rte_eth_allmulticast_enable(port_id);
1105         else if (rte_eth_allmulticast_get(port_id) == 0)
1106                 rte_eth_allmulticast_disable(port_id);
1107 }
1108
1109 int
1110 rte_eth_dev_start(uint8_t port_id)
1111 {
1112         struct rte_eth_dev *dev;
1113         int diag;
1114
1115         /* This function is only safe when called from the primary process
1116          * in a multi-process setup*/
1117         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1118
1119         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1120
1121         dev = &rte_eth_devices[port_id];
1122
1123         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1124
1125         if (dev->data->dev_started != 0) {
1126                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1127                         " already started\n",
1128                         port_id);
1129                 return 0;
1130         }
1131
1132         diag = (*dev->dev_ops->dev_start)(dev);
1133         if (diag == 0)
1134                 dev->data->dev_started = 1;
1135         else
1136                 return diag;
1137
1138         rte_eth_dev_config_restore(port_id);
1139
1140         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1141                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1142                 (*dev->dev_ops->link_update)(dev, 0);
1143         }
1144         return 0;
1145 }
1146
1147 void
1148 rte_eth_dev_stop(uint8_t port_id)
1149 {
1150         struct rte_eth_dev *dev;
1151
1152         /* This function is only safe when called from the primary process
1153          * in a multi-process setup*/
1154         PROC_PRIMARY_OR_RET();
1155
1156         VALID_PORTID_OR_RET(port_id);
1157         dev = &rte_eth_devices[port_id];
1158
1159         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1160
1161         if (dev->data->dev_started == 0) {
1162                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1163                         " already stopped\n",
1164                         port_id);
1165                 return;
1166         }
1167
1168         dev->data->dev_started = 0;
1169         (*dev->dev_ops->dev_stop)(dev);
1170 }
1171
1172 int
1173 rte_eth_dev_set_link_up(uint8_t port_id)
1174 {
1175         struct rte_eth_dev *dev;
1176
1177         /* This function is only safe when called from the primary process
1178          * in a multi-process setup*/
1179         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1180
1181         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1182
1183         dev = &rte_eth_devices[port_id];
1184
1185         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1186         return (*dev->dev_ops->dev_set_link_up)(dev);
1187 }
1188
1189 int
1190 rte_eth_dev_set_link_down(uint8_t port_id)
1191 {
1192         struct rte_eth_dev *dev;
1193
1194         /* This function is only safe when called from the primary process
1195          * in a multi-process setup*/
1196         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1197
1198         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1199
1200         dev = &rte_eth_devices[port_id];
1201
1202         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1203         return (*dev->dev_ops->dev_set_link_down)(dev);
1204 }
1205
1206 void
1207 rte_eth_dev_close(uint8_t port_id)
1208 {
1209         struct rte_eth_dev *dev;
1210
1211         /* This function is only safe when called from the primary process
1212          * in a multi-process setup*/
1213         PROC_PRIMARY_OR_RET();
1214
1215         VALID_PORTID_OR_RET(port_id);
1216         dev = &rte_eth_devices[port_id];
1217
1218         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1219         dev->data->dev_started = 0;
1220         (*dev->dev_ops->dev_close)(dev);
1221
1222         rte_free(dev->data->rx_queues);
1223         dev->data->rx_queues = NULL;
1224         rte_free(dev->data->tx_queues);
1225         dev->data->tx_queues = NULL;
1226 }
1227
1228 int
1229 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1230                        uint16_t nb_rx_desc, unsigned int socket_id,
1231                        const struct rte_eth_rxconf *rx_conf,
1232                        struct rte_mempool *mp)
1233 {
1234         int ret;
1235         uint32_t mbp_buf_size;
1236         struct rte_eth_dev *dev;
1237         struct rte_eth_dev_info dev_info;
1238
1239         /* This function is only safe when called from the primary process
1240          * in a multi-process setup*/
1241         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1242
1243         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1244
1245         dev = &rte_eth_devices[port_id];
1246         if (rx_queue_id >= dev->data->nb_rx_queues) {
1247                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1248                 return -EINVAL;
1249         }
1250
1251         if (dev->data->dev_started) {
1252                 PMD_DEBUG_TRACE(
1253                     "port %d must be stopped to allow configuration\n", port_id);
1254                 return -EBUSY;
1255         }
1256
1257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1258         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1259
1260         /*
1261          * Check the size of the mbuf data buffer.
1262          * This value must be provided in the private data of the memory pool.
1263          * First check that the memory pool has a valid private data.
1264          */
1265         rte_eth_dev_info_get(port_id, &dev_info);
1266         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1267                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1268                                 mp->name, (int) mp->private_data_size,
1269                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1270                 return -ENOSPC;
1271         }
1272         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1273
1274         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1275                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1276                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1277                                 "=%d)\n",
1278                                 mp->name,
1279                                 (int)mbp_buf_size,
1280                                 (int)(RTE_PKTMBUF_HEADROOM +
1281                                       dev_info.min_rx_bufsize),
1282                                 (int)RTE_PKTMBUF_HEADROOM,
1283                                 (int)dev_info.min_rx_bufsize);
1284                 return -EINVAL;
1285         }
1286
1287         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1288                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1289                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1290
1291                 PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1292                         "should be: <= %hu, = %hu, and a product of %hu\n",
1293                         nb_rx_desc,
1294                         dev_info.rx_desc_lim.nb_max,
1295                         dev_info.rx_desc_lim.nb_min,
1296                         dev_info.rx_desc_lim.nb_align);
1297                 return -EINVAL;
1298         }
1299
1300         if (rx_conf == NULL)
1301                 rx_conf = &dev_info.default_rxconf;
1302
1303         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1304                                               socket_id, rx_conf, mp);
1305         if (!ret) {
1306                 if (!dev->data->min_rx_buf_size ||
1307                     dev->data->min_rx_buf_size > mbp_buf_size)
1308                         dev->data->min_rx_buf_size = mbp_buf_size;
1309         }
1310
1311         return ret;
1312 }
1313
1314 int
1315 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1316                        uint16_t nb_tx_desc, unsigned int socket_id,
1317                        const struct rte_eth_txconf *tx_conf)
1318 {
1319         struct rte_eth_dev *dev;
1320         struct rte_eth_dev_info dev_info;
1321
1322         /* This function is only safe when called from the primary process
1323          * in a multi-process setup*/
1324         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1325
1326         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1327
1328         dev = &rte_eth_devices[port_id];
1329         if (tx_queue_id >= dev->data->nb_tx_queues) {
1330                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1331                 return -EINVAL;
1332         }
1333
1334         if (dev->data->dev_started) {
1335                 PMD_DEBUG_TRACE(
1336                     "port %d must be stopped to allow configuration\n", port_id);
1337                 return -EBUSY;
1338         }
1339
1340         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1342
1343         rte_eth_dev_info_get(port_id, &dev_info);
1344
1345         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1346             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1347             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1348                 PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1349                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1350                                 nb_tx_desc,
1351                                 dev_info.tx_desc_lim.nb_max,
1352                                 dev_info.tx_desc_lim.nb_min,
1353                                 dev_info.tx_desc_lim.nb_align);
1354                 return -EINVAL;
1355         }
1356
1357         if (tx_conf == NULL)
1358                 tx_conf = &dev_info.default_txconf;
1359
1360         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1361                                                socket_id, tx_conf);
1362 }
1363
1364 void
1365 rte_eth_promiscuous_enable(uint8_t port_id)
1366 {
1367         struct rte_eth_dev *dev;
1368
1369         VALID_PORTID_OR_RET(port_id);
1370         dev = &rte_eth_devices[port_id];
1371
1372         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1373         (*dev->dev_ops->promiscuous_enable)(dev);
1374         dev->data->promiscuous = 1;
1375 }
1376
1377 void
1378 rte_eth_promiscuous_disable(uint8_t port_id)
1379 {
1380         struct rte_eth_dev *dev;
1381
1382         VALID_PORTID_OR_RET(port_id);
1383         dev = &rte_eth_devices[port_id];
1384
1385         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1386         dev->data->promiscuous = 0;
1387         (*dev->dev_ops->promiscuous_disable)(dev);
1388 }
1389
1390 int
1391 rte_eth_promiscuous_get(uint8_t port_id)
1392 {
1393         struct rte_eth_dev *dev;
1394
1395         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1396
1397         dev = &rte_eth_devices[port_id];
1398         return dev->data->promiscuous;
1399 }
1400
1401 void
1402 rte_eth_allmulticast_enable(uint8_t port_id)
1403 {
1404         struct rte_eth_dev *dev;
1405
1406         VALID_PORTID_OR_RET(port_id);
1407         dev = &rte_eth_devices[port_id];
1408
1409         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1410         (*dev->dev_ops->allmulticast_enable)(dev);
1411         dev->data->all_multicast = 1;
1412 }
1413
1414 void
1415 rte_eth_allmulticast_disable(uint8_t port_id)
1416 {
1417         struct rte_eth_dev *dev;
1418
1419         VALID_PORTID_OR_RET(port_id);
1420         dev = &rte_eth_devices[port_id];
1421
1422         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1423         dev->data->all_multicast = 0;
1424         (*dev->dev_ops->allmulticast_disable)(dev);
1425 }
1426
1427 int
1428 rte_eth_allmulticast_get(uint8_t port_id)
1429 {
1430         struct rte_eth_dev *dev;
1431
1432         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1433
1434         dev = &rte_eth_devices[port_id];
1435         return dev->data->all_multicast;
1436 }
1437
1438 static inline int
1439 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1440                                 struct rte_eth_link *link)
1441 {
1442         struct rte_eth_link *dst = link;
1443         struct rte_eth_link *src = &(dev->data->dev_link);
1444
1445         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1446                                         *(uint64_t *)src) == 0)
1447                 return -1;
1448
1449         return 0;
1450 }
1451
1452 void
1453 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1454 {
1455         struct rte_eth_dev *dev;
1456
1457         VALID_PORTID_OR_RET(port_id);
1458         dev = &rte_eth_devices[port_id];
1459
1460         if (dev->data->dev_conf.intr_conf.lsc != 0)
1461                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1462         else {
1463                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1464                 (*dev->dev_ops->link_update)(dev, 1);
1465                 *eth_link = dev->data->dev_link;
1466         }
1467 }
1468
1469 void
1470 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1471 {
1472         struct rte_eth_dev *dev;
1473
1474         VALID_PORTID_OR_RET(port_id);
1475         dev = &rte_eth_devices[port_id];
1476
1477         if (dev->data->dev_conf.intr_conf.lsc != 0)
1478                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1479         else {
1480                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1481                 (*dev->dev_ops->link_update)(dev, 0);
1482                 *eth_link = dev->data->dev_link;
1483         }
1484 }
1485
1486 int
1487 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1488 {
1489         struct rte_eth_dev *dev;
1490
1491         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1492
1493         dev = &rte_eth_devices[port_id];
1494         memset(stats, 0, sizeof(*stats));
1495
1496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1497         (*dev->dev_ops->stats_get)(dev, stats);
1498         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1499         return 0;
1500 }
1501
1502 void
1503 rte_eth_stats_reset(uint8_t port_id)
1504 {
1505         struct rte_eth_dev *dev;
1506
1507         VALID_PORTID_OR_RET(port_id);
1508         dev = &rte_eth_devices[port_id];
1509
1510         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1511         (*dev->dev_ops->stats_reset)(dev);
1512 }
1513
1514 /* retrieve ethdev extended statistics */
1515 int
1516 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1517         unsigned n)
1518 {
1519         struct rte_eth_stats eth_stats;
1520         struct rte_eth_dev *dev;
1521         unsigned count = 0, i, q;
1522         signed xcount = 0;
1523         uint64_t val, *stats_ptr;
1524
1525         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1526
1527         dev = &rte_eth_devices[port_id];
1528
1529         /* Return generic statistics */
1530         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1531                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1532
1533         /* implemented by the driver */
1534         if (dev->dev_ops->xstats_get != NULL) {
1535                 /* Retrieve the xstats from the driver at the end of the
1536                  * xstats struct.
1537                  */
1538                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1539                          (n > count) ? n - count : 0);
1540
1541                 if (xcount < 0)
1542                         return xcount;
1543         }
1544
1545         if (n < count + xcount)
1546                 return count + xcount;
1547
1548         /* now fill the xstats structure */
1549         count = 0;
1550         rte_eth_stats_get(port_id, &eth_stats);
1551
1552         /* global stats */
1553         for (i = 0; i < RTE_NB_STATS; i++) {
1554                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1555                                         rte_stats_strings[i].offset);
1556                 val = *stats_ptr;
1557                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1558                         "%s", rte_stats_strings[i].name);
1559                 xstats[count++].value = val;
1560         }
1561
1562         /* per-rxq stats */
1563         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1564                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1565                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1566                                         rte_rxq_stats_strings[i].offset +
1567                                         q * sizeof(uint64_t));
1568                         val = *stats_ptr;
1569                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1570                                 "rx_q%u_%s", q,
1571                                 rte_rxq_stats_strings[i].name);
1572                         xstats[count++].value = val;
1573                 }
1574         }
1575
1576         /* per-txq stats */
1577         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1578                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1579                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1580                                         rte_txq_stats_strings[i].offset +
1581                                         q * sizeof(uint64_t));
1582                         val = *stats_ptr;
1583                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1584                                 "tx_q%u_%s", q,
1585                                 rte_txq_stats_strings[i].name);
1586                         xstats[count++].value = val;
1587                 }
1588         }
1589
1590         return count + xcount;
1591 }
1592
1593 /* reset ethdev extended statistics */
1594 void
1595 rte_eth_xstats_reset(uint8_t port_id)
1596 {
1597         struct rte_eth_dev *dev;
1598
1599         VALID_PORTID_OR_RET(port_id);
1600         dev = &rte_eth_devices[port_id];
1601
1602         /* implemented by the driver */
1603         if (dev->dev_ops->xstats_reset != NULL) {
1604                 (*dev->dev_ops->xstats_reset)(dev);
1605                 return;
1606         }
1607
1608         /* fallback to default */
1609         rte_eth_stats_reset(port_id);
1610 }
1611
1612 static int
1613 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1614                 uint8_t is_rx)
1615 {
1616         struct rte_eth_dev *dev;
1617
1618         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1619
1620         dev = &rte_eth_devices[port_id];
1621
1622         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1623         return (*dev->dev_ops->queue_stats_mapping_set)
1624                         (dev, queue_id, stat_idx, is_rx);
1625 }
1626
1627
1628 int
1629 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1630                 uint8_t stat_idx)
1631 {
1632         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1633                         STAT_QMAP_TX);
1634 }
1635
1636
1637 int
1638 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1639                 uint8_t stat_idx)
1640 {
1641         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1642                         STAT_QMAP_RX);
1643 }
1644
1645
1646 void
1647 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1648 {
1649         struct rte_eth_dev *dev;
1650         const struct rte_eth_desc_lim lim = {
1651                 .nb_max = UINT16_MAX,
1652                 .nb_min = 0,
1653                 .nb_align = 1,
1654         };
1655
1656         VALID_PORTID_OR_RET(port_id);
1657         dev = &rte_eth_devices[port_id];
1658
1659         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1660         dev_info->rx_desc_lim = lim;
1661         dev_info->tx_desc_lim = lim;
1662
1663         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1664         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1665         dev_info->pci_dev = dev->pci_dev;
1666         dev_info->driver_name = dev->data->drv_name;
1667 }
1668
1669 void
1670 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1671 {
1672         struct rte_eth_dev *dev;
1673
1674         VALID_PORTID_OR_RET(port_id);
1675         dev = &rte_eth_devices[port_id];
1676         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1677 }
1678
1679
1680 int
1681 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1682 {
1683         struct rte_eth_dev *dev;
1684
1685         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1686
1687         dev = &rte_eth_devices[port_id];
1688         *mtu = dev->data->mtu;
1689         return 0;
1690 }
1691
1692 int
1693 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1694 {
1695         int ret;
1696         struct rte_eth_dev *dev;
1697
1698         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1699         dev = &rte_eth_devices[port_id];
1700         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1701
1702         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1703         if (!ret)
1704                 dev->data->mtu = mtu;
1705
1706         return ret;
1707 }
1708
1709 int
1710 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1711 {
1712         struct rte_eth_dev *dev;
1713
1714         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1715         dev = &rte_eth_devices[port_id];
1716         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1717                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1718                 return -ENOSYS;
1719         }
1720
1721         if (vlan_id > 4095) {
1722                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1723                                 port_id, (unsigned) vlan_id);
1724                 return -EINVAL;
1725         }
1726         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1727
1728         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1729 }
1730
1731 int
1732 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1733 {
1734         struct rte_eth_dev *dev;
1735
1736         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1737         dev = &rte_eth_devices[port_id];
1738         if (rx_queue_id >= dev->data->nb_rx_queues) {
1739                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1740                 return -EINVAL;
1741         }
1742
1743         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1744         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1745
1746         return 0;
1747 }
1748
1749 int
1750 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1751 {
1752         struct rte_eth_dev *dev;
1753
1754         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1755         dev = &rte_eth_devices[port_id];
1756         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1757         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1758
1759         return 0;
1760 }
1761
1762 int
1763 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1764 {
1765         struct rte_eth_dev *dev;
1766         int ret = 0;
1767         int mask = 0;
1768         int cur, org = 0;
1769
1770         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1771         dev = &rte_eth_devices[port_id];
1772
1773         /*check which option changed by application*/
1774         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1775         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1776         if (cur != org) {
1777                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1778                 mask |= ETH_VLAN_STRIP_MASK;
1779         }
1780
1781         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1782         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1783         if (cur != org) {
1784                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1785                 mask |= ETH_VLAN_FILTER_MASK;
1786         }
1787
1788         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1789         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1790         if (cur != org) {
1791                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1792                 mask |= ETH_VLAN_EXTEND_MASK;
1793         }
1794
1795         /*no change*/
1796         if (mask == 0)
1797                 return ret;
1798
1799         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1800         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1801
1802         return ret;
1803 }
1804
1805 int
1806 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1807 {
1808         struct rte_eth_dev *dev;
1809         int ret = 0;
1810
1811         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1812         dev = &rte_eth_devices[port_id];
1813
1814         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1815                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1816
1817         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1818                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1819
1820         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1821                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1822
1823         return ret;
1824 }
1825
1826 int
1827 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1828 {
1829         struct rte_eth_dev *dev;
1830
1831         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1832         dev = &rte_eth_devices[port_id];
1833         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1834         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1835
1836         return 0;
1837 }
1838
1839 int
1840 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1841 {
1842         struct rte_eth_dev *dev;
1843
1844         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1845         dev = &rte_eth_devices[port_id];
1846         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1847         memset(fc_conf, 0, sizeof(*fc_conf));
1848         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1849 }
1850
1851 int
1852 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1853 {
1854         struct rte_eth_dev *dev;
1855
1856         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1857         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1858                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1859                 return -EINVAL;
1860         }
1861
1862         dev = &rte_eth_devices[port_id];
1863         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1864         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1865 }
1866
1867 int
1868 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1869 {
1870         struct rte_eth_dev *dev;
1871
1872         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1873         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1874                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1875                 return -EINVAL;
1876         }
1877
1878         dev = &rte_eth_devices[port_id];
1879         /* High water, low water validation are device specific */
1880         if  (*dev->dev_ops->priority_flow_ctrl_set)
1881                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1882         return -ENOTSUP;
1883 }
1884
1885 static int
1886 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1887                         uint16_t reta_size)
1888 {
1889         uint16_t i, num;
1890
1891         if (!reta_conf)
1892                 return -EINVAL;
1893
1894         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1895                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1896                                                         RTE_RETA_GROUP_SIZE);
1897                 return -EINVAL;
1898         }
1899
1900         num = reta_size / RTE_RETA_GROUP_SIZE;
1901         for (i = 0; i < num; i++) {
1902                 if (reta_conf[i].mask)
1903                         return 0;
1904         }
1905
1906         return -EINVAL;
1907 }
1908
1909 static int
1910 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1911                          uint16_t reta_size,
1912                          uint8_t max_rxq)
1913 {
1914         uint16_t i, idx, shift;
1915
1916         if (!reta_conf)
1917                 return -EINVAL;
1918
1919         if (max_rxq == 0) {
1920                 PMD_DEBUG_TRACE("No receive queue is available\n");
1921                 return -EINVAL;
1922         }
1923
1924         for (i = 0; i < reta_size; i++) {
1925                 idx = i / RTE_RETA_GROUP_SIZE;
1926                 shift = i % RTE_RETA_GROUP_SIZE;
1927                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1928                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1929                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1930                                 "the maximum rxq index: %u\n", idx, shift,
1931                                 reta_conf[idx].reta[shift], max_rxq);
1932                         return -EINVAL;
1933                 }
1934         }
1935
1936         return 0;
1937 }
1938
1939 int
1940 rte_eth_dev_rss_reta_update(uint8_t port_id,
1941                             struct rte_eth_rss_reta_entry64 *reta_conf,
1942                             uint16_t reta_size)
1943 {
1944         struct rte_eth_dev *dev;
1945         int ret;
1946
1947         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1948         /* Check mask bits */
1949         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1950         if (ret < 0)
1951                 return ret;
1952
1953         dev = &rte_eth_devices[port_id];
1954
1955         /* Check entry value */
1956         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1957                                 dev->data->nb_rx_queues);
1958         if (ret < 0)
1959                 return ret;
1960
1961         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1962         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1963 }
1964
1965 int
1966 rte_eth_dev_rss_reta_query(uint8_t port_id,
1967                            struct rte_eth_rss_reta_entry64 *reta_conf,
1968                            uint16_t reta_size)
1969 {
1970         struct rte_eth_dev *dev;
1971         int ret;
1972
1973         if (port_id >= nb_ports) {
1974                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1975                 return -ENODEV;
1976         }
1977
1978         /* Check mask bits */
1979         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1980         if (ret < 0)
1981                 return ret;
1982
1983         dev = &rte_eth_devices[port_id];
1984         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1985         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1986 }
1987
1988 int
1989 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1990 {
1991         struct rte_eth_dev *dev;
1992         uint16_t rss_hash_protos;
1993
1994         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1995         rss_hash_protos = rss_conf->rss_hf;
1996         if ((rss_hash_protos != 0) &&
1997             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1998                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1999                                 rss_hash_protos);
2000                 return -EINVAL;
2001         }
2002         dev = &rte_eth_devices[port_id];
2003         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2004         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2005 }
2006
2007 int
2008 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2009                               struct rte_eth_rss_conf *rss_conf)
2010 {
2011         struct rte_eth_dev *dev;
2012
2013         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2014         dev = &rte_eth_devices[port_id];
2015         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2016         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2017 }
2018
2019 int
2020 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2021                            struct rte_eth_udp_tunnel *udp_tunnel)
2022 {
2023         struct rte_eth_dev *dev;
2024
2025         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2026         if (udp_tunnel == NULL) {
2027                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2028                 return -EINVAL;
2029         }
2030
2031         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2032                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2033                 return -EINVAL;
2034         }
2035
2036         dev = &rte_eth_devices[port_id];
2037         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2038         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2039 }
2040
2041 int
2042 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2043                               struct rte_eth_udp_tunnel *udp_tunnel)
2044 {
2045         struct rte_eth_dev *dev;
2046
2047         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2048         dev = &rte_eth_devices[port_id];
2049
2050         if (udp_tunnel == NULL) {
2051                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2052                 return -EINVAL;
2053         }
2054
2055         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2056                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2057                 return -EINVAL;
2058         }
2059
2060         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2061         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2062 }
2063
2064 int
2065 rte_eth_led_on(uint8_t port_id)
2066 {
2067         struct rte_eth_dev *dev;
2068
2069         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2070         dev = &rte_eth_devices[port_id];
2071         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2072         return (*dev->dev_ops->dev_led_on)(dev);
2073 }
2074
2075 int
2076 rte_eth_led_off(uint8_t port_id)
2077 {
2078         struct rte_eth_dev *dev;
2079
2080         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2081         dev = &rte_eth_devices[port_id];
2082         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2083         return (*dev->dev_ops->dev_led_off)(dev);
2084 }
2085
2086 /*
2087  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2088  * an empty spot.
2089  */
2090 static int
2091 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2092 {
2093         struct rte_eth_dev_info dev_info;
2094         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2095         unsigned i;
2096
2097         rte_eth_dev_info_get(port_id, &dev_info);
2098
2099         for (i = 0; i < dev_info.max_mac_addrs; i++)
2100                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2101                         return i;
2102
2103         return -1;
2104 }
2105
2106 static const struct ether_addr null_mac_addr;
2107
2108 int
2109 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2110                         uint32_t pool)
2111 {
2112         struct rte_eth_dev *dev;
2113         int index;
2114         uint64_t pool_mask;
2115
2116         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2117         dev = &rte_eth_devices[port_id];
2118         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2119
2120         if (is_zero_ether_addr(addr)) {
2121                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2122                         port_id);
2123                 return -EINVAL;
2124         }
2125         if (pool >= ETH_64_POOLS) {
2126                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2127                 return -EINVAL;
2128         }
2129
2130         index = get_mac_addr_index(port_id, addr);
2131         if (index < 0) {
2132                 index = get_mac_addr_index(port_id, &null_mac_addr);
2133                 if (index < 0) {
2134                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2135                                 port_id);
2136                         return -ENOSPC;
2137                 }
2138         } else {
2139                 pool_mask = dev->data->mac_pool_sel[index];
2140
2141                 /* Check if both MAC address and pool is already there, and do nothing */
2142                 if (pool_mask & (1ULL << pool))
2143                         return 0;
2144         }
2145
2146         /* Update NIC */
2147         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2148
2149         /* Update address in NIC data structure */
2150         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2151
2152         /* Update pool bitmap in NIC data structure */
2153         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2154
2155         return 0;
2156 }
2157
2158 int
2159 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2160 {
2161         struct rte_eth_dev *dev;
2162         int index;
2163
2164         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2165         dev = &rte_eth_devices[port_id];
2166         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2167
2168         index = get_mac_addr_index(port_id, addr);
2169         if (index == 0) {
2170                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2171                 return -EADDRINUSE;
2172         } else if (index < 0)
2173                 return 0;  /* Do nothing if address wasn't found */
2174
2175         /* Update NIC */
2176         (*dev->dev_ops->mac_addr_remove)(dev, index);
2177
2178         /* Update address in NIC data structure */
2179         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2180
2181         /* reset pool bitmap */
2182         dev->data->mac_pool_sel[index] = 0;
2183
2184         return 0;
2185 }
2186
2187 int
2188 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2189 {
2190         struct rte_eth_dev *dev;
2191
2192         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2193
2194         if (!is_valid_assigned_ether_addr(addr))
2195                 return -EINVAL;
2196
2197         dev = &rte_eth_devices[port_id];
2198         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2199
2200         /* Update default address in NIC data structure */
2201         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2202
2203         (*dev->dev_ops->mac_addr_set)(dev, addr);
2204
2205         return 0;
2206 }
2207
2208 int
2209 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2210                                 uint16_t rx_mode, uint8_t on)
2211 {
2212         uint16_t num_vfs;
2213         struct rte_eth_dev *dev;
2214         struct rte_eth_dev_info dev_info;
2215
2216         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2217
2218         dev = &rte_eth_devices[port_id];
2219         rte_eth_dev_info_get(port_id, &dev_info);
2220
2221         num_vfs = dev_info.max_vfs;
2222         if (vf > num_vfs) {
2223                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2224                 return -EINVAL;
2225         }
2226
2227         if (rx_mode == 0) {
2228                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2229                 return -EINVAL;
2230         }
2231         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2232         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2233 }
2234
2235 /*
2236  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2237  * an empty spot.
2238  */
2239 static int
2240 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2241 {
2242         struct rte_eth_dev_info dev_info;
2243         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2244         unsigned i;
2245
2246         rte_eth_dev_info_get(port_id, &dev_info);
2247         if (!dev->data->hash_mac_addrs)
2248                 return -1;
2249
2250         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2251                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2252                         ETHER_ADDR_LEN) == 0)
2253                         return i;
2254
2255         return -1;
2256 }
2257
2258 int
2259 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2260                                 uint8_t on)
2261 {
2262         int index;
2263         int ret;
2264         struct rte_eth_dev *dev;
2265
2266         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2267
2268         dev = &rte_eth_devices[port_id];
2269         if (is_zero_ether_addr(addr)) {
2270                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2271                         port_id);
2272                 return -EINVAL;
2273         }
2274
2275         index = get_hash_mac_addr_index(port_id, addr);
2276         /* Check if it's already there, and do nothing */
2277         if ((index >= 0) && (on))
2278                 return 0;
2279
2280         if (index < 0) {
2281                 if (!on) {
2282                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2283                                 "set in UTA\n", port_id);
2284                         return -EINVAL;
2285                 }
2286
2287                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2288                 if (index < 0) {
2289                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2290                                         port_id);
2291                         return -ENOSPC;
2292                 }
2293         }
2294
2295         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2296         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2297         if (ret == 0) {
2298                 /* Update address in NIC data structure */
2299                 if (on)
2300                         ether_addr_copy(addr,
2301                                         &dev->data->hash_mac_addrs[index]);
2302                 else
2303                         ether_addr_copy(&null_mac_addr,
2304                                         &dev->data->hash_mac_addrs[index]);
2305         }
2306
2307         return ret;
2308 }
2309
2310 int
2311 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2312 {
2313         struct rte_eth_dev *dev;
2314
2315         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2316
2317         dev = &rte_eth_devices[port_id];
2318
2319         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2320         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2321 }
2322
2323 int
2324 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2325 {
2326         uint16_t num_vfs;
2327         struct rte_eth_dev *dev;
2328         struct rte_eth_dev_info dev_info;
2329
2330         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331
2332         dev = &rte_eth_devices[port_id];
2333         rte_eth_dev_info_get(port_id, &dev_info);
2334
2335         num_vfs = dev_info.max_vfs;
2336         if (vf > num_vfs) {
2337                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2338                 return -EINVAL;
2339         }
2340
2341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2342         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2343 }
2344
2345 int
2346 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2347 {
2348         uint16_t num_vfs;
2349         struct rte_eth_dev *dev;
2350         struct rte_eth_dev_info dev_info;
2351
2352         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2353
2354         dev = &rte_eth_devices[port_id];
2355         rte_eth_dev_info_get(port_id, &dev_info);
2356
2357         num_vfs = dev_info.max_vfs;
2358         if (vf > num_vfs) {
2359                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2360                 return -EINVAL;
2361         }
2362
2363         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2364         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2365 }
2366
2367 int
2368 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2369                                uint64_t vf_mask, uint8_t vlan_on)
2370 {
2371         struct rte_eth_dev *dev;
2372
2373         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374
2375         dev = &rte_eth_devices[port_id];
2376
2377         if (vlan_id > ETHER_MAX_VLAN_ID) {
2378                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2379                         vlan_id);
2380                 return -EINVAL;
2381         }
2382
2383         if (vf_mask == 0) {
2384                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2385                 return -EINVAL;
2386         }
2387
2388         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2389         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2390                                                    vf_mask, vlan_on);
2391 }
2392
2393 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2394                                         uint16_t tx_rate)
2395 {
2396         struct rte_eth_dev *dev;
2397         struct rte_eth_dev_info dev_info;
2398         struct rte_eth_link link;
2399
2400         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2401
2402         dev = &rte_eth_devices[port_id];
2403         rte_eth_dev_info_get(port_id, &dev_info);
2404         link = dev->data->dev_link;
2405
2406         if (queue_idx > dev_info.max_tx_queues) {
2407                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2408                                 "invalid queue id=%d\n", port_id, queue_idx);
2409                 return -EINVAL;
2410         }
2411
2412         if (tx_rate > link.link_speed) {
2413                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2414                                 "bigger than link speed= %d\n",
2415                         tx_rate, link.link_speed);
2416                 return -EINVAL;
2417         }
2418
2419         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2420         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2421 }
2422
2423 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2424                                 uint64_t q_msk)
2425 {
2426         struct rte_eth_dev *dev;
2427         struct rte_eth_dev_info dev_info;
2428         struct rte_eth_link link;
2429
2430         if (q_msk == 0)
2431                 return 0;
2432
2433         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2434
2435         dev = &rte_eth_devices[port_id];
2436         rte_eth_dev_info_get(port_id, &dev_info);
2437         link = dev->data->dev_link;
2438
2439         if (vf > dev_info.max_vfs) {
2440                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2441                                 "invalid vf id=%d\n", port_id, vf);
2442                 return -EINVAL;
2443         }
2444
2445         if (tx_rate > link.link_speed) {
2446                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2447                                 "bigger than link speed= %d\n",
2448                                 tx_rate, link.link_speed);
2449                 return -EINVAL;
2450         }
2451
2452         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2453         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2454 }
2455
2456 int
2457 rte_eth_mirror_rule_set(uint8_t port_id,
2458                         struct rte_eth_mirror_conf *mirror_conf,
2459                         uint8_t rule_id, uint8_t on)
2460 {
2461         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2462
2463         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2464         if (mirror_conf->rule_type == 0) {
2465                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2466                 return -EINVAL;
2467         }
2468
2469         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2470                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2471                                 ETH_64_POOLS - 1);
2472                 return -EINVAL;
2473         }
2474
2475         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2476              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2477             (mirror_conf->pool_mask == 0)) {
2478                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2479                 return -EINVAL;
2480         }
2481
2482         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2483             mirror_conf->vlan.vlan_mask == 0) {
2484                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2485                 return -EINVAL;
2486         }
2487
2488         dev = &rte_eth_devices[port_id];
2489         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2490
2491         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2492 }
2493
2494 int
2495 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2496 {
2497         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2498
2499         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2500
2501         dev = &rte_eth_devices[port_id];
2502         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2503
2504         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2505 }
2506
2507 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2508 uint16_t
2509 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2510                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2511 {
2512         struct rte_eth_dev *dev;
2513
2514         VALID_PORTID_OR_ERR_RET(port_id, 0);
2515
2516         dev = &rte_eth_devices[port_id];
2517         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2518         if (queue_id >= dev->data->nb_rx_queues) {
2519                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2520                 return 0;
2521         }
2522         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2523                                                 rx_pkts, nb_pkts);
2524 }
2525
2526 uint16_t
2527 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2528                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2529 {
2530         struct rte_eth_dev *dev;
2531
2532         VALID_PORTID_OR_ERR_RET(port_id, 0);
2533
2534         dev = &rte_eth_devices[port_id];
2535
2536         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2537         if (queue_id >= dev->data->nb_tx_queues) {
2538                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2539                 return 0;
2540         }
2541         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2542                                                 tx_pkts, nb_pkts);
2543 }
2544
2545 uint32_t
2546 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2547 {
2548         struct rte_eth_dev *dev;
2549
2550         VALID_PORTID_OR_ERR_RET(port_id, 0);
2551
2552         dev = &rte_eth_devices[port_id];
2553         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2554         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2555 }
2556
2557 int
2558 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2559 {
2560         struct rte_eth_dev *dev;
2561
2562         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2563
2564         dev = &rte_eth_devices[port_id];
2565         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2566         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2567                                                    offset);
2568 }
2569 #endif
2570
2571 int
2572 rte_eth_dev_callback_register(uint8_t port_id,
2573                         enum rte_eth_event_type event,
2574                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2575 {
2576         struct rte_eth_dev *dev;
2577         struct rte_eth_dev_callback *user_cb;
2578
2579         if (!cb_fn)
2580                 return -EINVAL;
2581
2582         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2583
2584         dev = &rte_eth_devices[port_id];
2585         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2586
2587         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2588                 if (user_cb->cb_fn == cb_fn &&
2589                         user_cb->cb_arg == cb_arg &&
2590                         user_cb->event == event) {
2591                         break;
2592                 }
2593         }
2594
2595         /* create a new callback. */
2596         if (user_cb == NULL)
2597                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2598                                       sizeof(struct rte_eth_dev_callback), 0);
2599         if (user_cb != NULL) {
2600                 user_cb->cb_fn = cb_fn;
2601                 user_cb->cb_arg = cb_arg;
2602                 user_cb->event = event;
2603                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2604         }
2605
2606         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2607         return (user_cb == NULL) ? -ENOMEM : 0;
2608 }
2609
2610 int
2611 rte_eth_dev_callback_unregister(uint8_t port_id,
2612                         enum rte_eth_event_type event,
2613                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2614 {
2615         int ret;
2616         struct rte_eth_dev *dev;
2617         struct rte_eth_dev_callback *cb, *next;
2618
2619         if (!cb_fn)
2620                 return -EINVAL;
2621
2622         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2623
2624         dev = &rte_eth_devices[port_id];
2625         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2626
2627         ret = 0;
2628         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2629
2630                 next = TAILQ_NEXT(cb, next);
2631
2632                 if (cb->cb_fn != cb_fn || cb->event != event ||
2633                                 (cb->cb_arg != (void *)-1 &&
2634                                 cb->cb_arg != cb_arg))
2635                         continue;
2636
2637                 /*
2638                  * if this callback is not executing right now,
2639                  * then remove it.
2640                  */
2641                 if (cb->active == 0) {
2642                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2643                         rte_free(cb);
2644                 } else {
2645                         ret = -EAGAIN;
2646                 }
2647         }
2648
2649         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2650         return ret;
2651 }
2652
2653 void
2654 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2655         enum rte_eth_event_type event)
2656 {
2657         struct rte_eth_dev_callback *cb_lst;
2658         struct rte_eth_dev_callback dev_cb;
2659
2660         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2661         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2662                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2663                         continue;
2664                 dev_cb = *cb_lst;
2665                 cb_lst->active = 1;
2666                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2667                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2668                                                 dev_cb.cb_arg);
2669                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2670                 cb_lst->active = 0;
2671         }
2672         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2673 }
2674
2675 int
2676 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2677 {
2678         uint32_t vec;
2679         struct rte_eth_dev *dev;
2680         struct rte_intr_handle *intr_handle;
2681         uint16_t qid;
2682         int rc;
2683
2684         if (!rte_eth_dev_is_valid_port(port_id)) {
2685                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2686                 return -ENODEV;
2687         }
2688
2689         dev = &rte_eth_devices[port_id];
2690         intr_handle = &dev->pci_dev->intr_handle;
2691         if (!intr_handle->intr_vec) {
2692                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2693                 return -EPERM;
2694         }
2695
2696         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2697                 vec = intr_handle->intr_vec[qid];
2698                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2699                 if (rc && rc != -EEXIST) {
2700                         PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2701                                         " op %d epfd %d vec %u\n",
2702                                         port_id, qid, op, epfd, vec);
2703                 }
2704         }
2705
2706         return 0;
2707 }
2708
2709 const struct rte_memzone *
2710 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2711                          uint16_t queue_id, size_t size, unsigned align,
2712                          int socket_id)
2713 {
2714         char z_name[RTE_MEMZONE_NAMESIZE];
2715         const struct rte_memzone *mz;
2716
2717         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2718                  dev->driver->pci_drv.name, ring_name,
2719                  dev->data->port_id, queue_id);
2720
2721         mz = rte_memzone_lookup(z_name);
2722         if (mz)
2723                 return mz;
2724
2725         if (is_xen_dom0_supported())
2726                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2727                                                    0, align, RTE_PGSIZE_2M);
2728         else
2729                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2730                                                    0, align);
2731 }
2732
2733 int
2734 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2735                           int epfd, int op, void *data)
2736 {
2737         uint32_t vec;
2738         struct rte_eth_dev *dev;
2739         struct rte_intr_handle *intr_handle;
2740         int rc;
2741
2742         if (!rte_eth_dev_is_valid_port(port_id)) {
2743                 PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2744                 return -ENODEV;
2745         }
2746
2747         dev = &rte_eth_devices[port_id];
2748         if (queue_id >= dev->data->nb_rx_queues) {
2749                 PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2750                 return -EINVAL;
2751         }
2752
2753         intr_handle = &dev->pci_dev->intr_handle;
2754         if (!intr_handle->intr_vec) {
2755                 PMD_DEBUG_TRACE("RX Intr vector unset\n");
2756                 return -EPERM;
2757         }
2758
2759         vec = intr_handle->intr_vec[queue_id];
2760         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2761         if (rc && rc != -EEXIST) {
2762                 PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2763                                 " op %d epfd %d vec %u\n",
2764                                 port_id, queue_id, op, epfd, vec);
2765                 return rc;
2766         }
2767
2768         return 0;
2769 }
2770
2771 int
2772 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2773                            uint16_t queue_id)
2774 {
2775         struct rte_eth_dev *dev;
2776
2777         if (!rte_eth_dev_is_valid_port(port_id)) {
2778                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2779                 return -ENODEV;
2780         }
2781
2782         dev = &rte_eth_devices[port_id];
2783
2784         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2785         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2786 }
2787
2788 int
2789 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2790                             uint16_t queue_id)
2791 {
2792         struct rte_eth_dev *dev;
2793
2794         if (!rte_eth_dev_is_valid_port(port_id)) {
2795                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2796                 return -ENODEV;
2797         }
2798
2799         dev = &rte_eth_devices[port_id];
2800
2801         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2802         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2803 }
2804
2805 #ifdef RTE_NIC_BYPASS
2806 int rte_eth_dev_bypass_init(uint8_t port_id)
2807 {
2808         struct rte_eth_dev *dev;
2809
2810         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2811
2812         dev = &rte_eth_devices[port_id];
2813         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2814         (*dev->dev_ops->bypass_init)(dev);
2815         return 0;
2816 }
2817
2818 int
2819 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2820 {
2821         struct rte_eth_dev *dev;
2822
2823         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2824
2825         dev = &rte_eth_devices[port_id];
2826         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2827         (*dev->dev_ops->bypass_state_show)(dev, state);
2828         return 0;
2829 }
2830
2831 int
2832 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2833 {
2834         struct rte_eth_dev *dev;
2835
2836         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2837
2838         dev = &rte_eth_devices[port_id];
2839         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2840         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2841         return 0;
2842 }
2843
2844 int
2845 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2846 {
2847         struct rte_eth_dev *dev;
2848
2849         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2850
2851         dev = &rte_eth_devices[port_id];
2852         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2853         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2854         return 0;
2855 }
2856
2857 int
2858 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2859 {
2860         struct rte_eth_dev *dev;
2861
2862         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2863
2864         dev = &rte_eth_devices[port_id];
2865
2866         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2867         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2868         return 0;
2869 }
2870
2871 int
2872 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2873 {
2874         struct rte_eth_dev *dev;
2875
2876         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2877
2878         dev = &rte_eth_devices[port_id];
2879
2880         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2881         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2882         return 0;
2883 }
2884
2885 int
2886 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2887 {
2888         struct rte_eth_dev *dev;
2889
2890         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891
2892         dev = &rte_eth_devices[port_id];
2893
2894         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2895         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2896         return 0;
2897 }
2898
2899 int
2900 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2901 {
2902         struct rte_eth_dev *dev;
2903
2904         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2905
2906         dev = &rte_eth_devices[port_id];
2907
2908         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2909         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2910         return 0;
2911 }
2912
2913 int
2914 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2915 {
2916         struct rte_eth_dev *dev;
2917
2918         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2919
2920         dev = &rte_eth_devices[port_id];
2921
2922         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2923         (*dev->dev_ops->bypass_wd_reset)(dev);
2924         return 0;
2925 }
2926 #endif
2927
2928 int
2929 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2930 {
2931         struct rte_eth_dev *dev;
2932
2933         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2934
2935         dev = &rte_eth_devices[port_id];
2936         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2937         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2938                                 RTE_ETH_FILTER_NOP, NULL);
2939 }
2940
2941 int
2942 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2943                        enum rte_filter_op filter_op, void *arg)
2944 {
2945         struct rte_eth_dev *dev;
2946
2947         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2948
2949         dev = &rte_eth_devices[port_id];
2950         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2951         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2952 }
2953
2954 void *
2955 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2956                 rte_rx_callback_fn fn, void *user_param)
2957 {
2958 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2959         rte_errno = ENOTSUP;
2960         return NULL;
2961 #endif
2962         /* check input parameters */
2963         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2964                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2965                 rte_errno = EINVAL;
2966                 return NULL;
2967         }
2968
2969         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2970
2971         if (cb == NULL) {
2972                 rte_errno = ENOMEM;
2973                 return NULL;
2974         }
2975
2976         cb->fn.rx = fn;
2977         cb->param = user_param;
2978
2979         /* Add the callbacks in fifo order. */
2980         struct rte_eth_rxtx_callback *tail =
2981                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2982
2983         if (!tail) {
2984                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2985
2986         } else {
2987                 while (tail->next)
2988                         tail = tail->next;
2989                 tail->next = cb;
2990         }
2991
2992         return cb;
2993 }
2994
2995 void *
2996 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2997                 rte_tx_callback_fn fn, void *user_param)
2998 {
2999 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3000         rte_errno = ENOTSUP;
3001         return NULL;
3002 #endif
3003         /* check input parameters */
3004         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3005                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3006                 rte_errno = EINVAL;
3007                 return NULL;
3008         }
3009
3010         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3011
3012         if (cb == NULL) {
3013                 rte_errno = ENOMEM;
3014                 return NULL;
3015         }
3016
3017         cb->fn.tx = fn;
3018         cb->param = user_param;
3019
3020         /* Add the callbacks in fifo order. */
3021         struct rte_eth_rxtx_callback *tail =
3022                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3023
3024         if (!tail) {
3025                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3026
3027         } else {
3028                 while (tail->next)
3029                         tail = tail->next;
3030                 tail->next = cb;
3031         }
3032
3033         return cb;
3034 }
3035
3036 int
3037 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3038                 struct rte_eth_rxtx_callback *user_cb)
3039 {
3040 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3041         return -ENOTSUP;
3042 #endif
3043         /* Check input parameters. */
3044         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3045                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3046                 return -EINVAL;
3047         }
3048
3049         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3050         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3051         struct rte_eth_rxtx_callback *prev_cb;
3052
3053         /* Reset head pointer and remove user cb if first in the list. */
3054         if (cb == user_cb) {
3055                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3056                 return 0;
3057         }
3058
3059         /* Remove the user cb from the callback list. */
3060         do {
3061                 prev_cb = cb;
3062                 cb = cb->next;
3063
3064                 if (cb == user_cb) {
3065                         prev_cb->next = user_cb->next;
3066                         return 0;
3067                 }
3068
3069         } while (cb != NULL);
3070
3071         /* Callback wasn't found. */
3072         return -EINVAL;
3073 }
3074
3075 int
3076 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3077                 struct rte_eth_rxtx_callback *user_cb)
3078 {
3079 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3080         return -ENOTSUP;
3081 #endif
3082         /* Check input parameters. */
3083         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3084                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3085                 return -EINVAL;
3086         }
3087
3088         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3089         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3090         struct rte_eth_rxtx_callback *prev_cb;
3091
3092         /* Reset head pointer and remove user cb if first in the list. */
3093         if (cb == user_cb) {
3094                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3095                 return 0;
3096         }
3097
3098         /* Remove the user cb from the callback list. */
3099         do {
3100                 prev_cb = cb;
3101                 cb = cb->next;
3102
3103                 if (cb == user_cb) {
3104                         prev_cb->next = user_cb->next;
3105                         return 0;
3106                 }
3107
3108         } while (cb != NULL);
3109
3110         /* Callback wasn't found. */
3111         return -EINVAL;
3112 }
3113
3114 int
3115 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3116         struct rte_eth_rxq_info *qinfo)
3117 {
3118         struct rte_eth_dev *dev;
3119
3120         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3121
3122         if (qinfo == NULL)
3123                 return -EINVAL;
3124
3125         dev = &rte_eth_devices[port_id];
3126         if (queue_id >= dev->data->nb_rx_queues) {
3127                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3128                 return -EINVAL;
3129         }
3130
3131         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3132
3133         memset(qinfo, 0, sizeof(*qinfo));
3134         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3135         return 0;
3136 }
3137
3138 int
3139 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3140         struct rte_eth_txq_info *qinfo)
3141 {
3142         struct rte_eth_dev *dev;
3143
3144         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3145
3146         if (qinfo == NULL)
3147                 return -EINVAL;
3148
3149         dev = &rte_eth_devices[port_id];
3150         if (queue_id >= dev->data->nb_tx_queues) {
3151                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3152                 return -EINVAL;
3153         }
3154
3155         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3156
3157         memset(qinfo, 0, sizeof(*qinfo));
3158         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3159         return 0;
3160 }
3161
3162 int
3163 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3164                              struct ether_addr *mc_addr_set,
3165                              uint32_t nb_mc_addr)
3166 {
3167         struct rte_eth_dev *dev;
3168
3169         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3170
3171         dev = &rte_eth_devices[port_id];
3172         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3173         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3174 }
3175
3176 int
3177 rte_eth_timesync_enable(uint8_t port_id)
3178 {
3179         struct rte_eth_dev *dev;
3180
3181         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3182         dev = &rte_eth_devices[port_id];
3183
3184         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3185         return (*dev->dev_ops->timesync_enable)(dev);
3186 }
3187
3188 int
3189 rte_eth_timesync_disable(uint8_t port_id)
3190 {
3191         struct rte_eth_dev *dev;
3192
3193         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3194         dev = &rte_eth_devices[port_id];
3195
3196         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3197         return (*dev->dev_ops->timesync_disable)(dev);
3198 }
3199
3200 int
3201 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3202                                    uint32_t flags)
3203 {
3204         struct rte_eth_dev *dev;
3205
3206         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3207         dev = &rte_eth_devices[port_id];
3208
3209         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3210         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3211 }
3212
3213 int
3214 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3215 {
3216         struct rte_eth_dev *dev;
3217
3218         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3219         dev = &rte_eth_devices[port_id];
3220
3221         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3222         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3223 }
3224
3225 int
3226 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3227 {
3228         struct rte_eth_dev *dev;
3229
3230         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3231         dev = &rte_eth_devices[port_id];
3232
3233         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3234         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3235 }
3236
3237 int
3238 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3239 {
3240         struct rte_eth_dev *dev;
3241
3242         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3243         dev = &rte_eth_devices[port_id];
3244
3245         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3246         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3247 }
3248
3249 int
3250 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3251 {
3252         struct rte_eth_dev *dev;
3253
3254         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3255         dev = &rte_eth_devices[port_id];
3256
3257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3258         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3259 }
3260
3261 int
3262 rte_eth_dev_get_reg_length(uint8_t port_id)
3263 {
3264         struct rte_eth_dev *dev;
3265
3266         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3267
3268         dev = &rte_eth_devices[port_id];
3269         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3270         return (*dev->dev_ops->get_reg_length)(dev);
3271 }
3272
3273 int
3274 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3275 {
3276         struct rte_eth_dev *dev;
3277
3278         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3279
3280         dev = &rte_eth_devices[port_id];
3281         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3282         return (*dev->dev_ops->get_reg)(dev, info);
3283 }
3284
3285 int
3286 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3287 {
3288         struct rte_eth_dev *dev;
3289
3290         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3291
3292         dev = &rte_eth_devices[port_id];
3293         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3294         return (*dev->dev_ops->get_eeprom_length)(dev);
3295 }
3296
3297 int
3298 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3299 {
3300         struct rte_eth_dev *dev;
3301
3302         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3303
3304         dev = &rte_eth_devices[port_id];
3305         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3306         return (*dev->dev_ops->get_eeprom)(dev, info);
3307 }
3308
3309 int
3310 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3311 {
3312         struct rte_eth_dev *dev;
3313
3314         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3315
3316         dev = &rte_eth_devices[port_id];
3317         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3318         return (*dev->dev_ops->set_eeprom)(dev, info);
3319 }
3320
3321 int
3322 rte_eth_dev_get_dcb_info(uint8_t port_id,
3323                              struct rte_eth_dcb_info *dcb_info)
3324 {
3325         struct rte_eth_dev *dev;
3326
3327         if (!rte_eth_dev_is_valid_port(port_id)) {
3328                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3329                 return -ENODEV;
3330         }
3331
3332         dev = &rte_eth_devices[port_id];
3333         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3334
3335         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3336         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3337 }
3338
3339 void
3340 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3341 {
3342         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3343                 PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3344                                 eth_dev, pci_dev);
3345                 return;
3346         }
3347
3348         eth_dev->data->dev_flags = 0;
3349         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3350                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3351         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3352                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3353
3354         eth_dev->data->kdrv = pci_dev->kdrv;
3355         eth_dev->data->numa_node = pci_dev->numa_node;
3356         eth_dev->data->drv_name = pci_dev->driver->name;
3357 }