ethdev: allow full control from secondary process
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
73 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
74 static struct rte_eth_dev_data *rte_eth_dev_data;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* store statistics names and its offset in stats structure  */
81 struct rte_eth_xstats_name_off {
82         char name[RTE_ETH_XSTATS_NAME_SIZE];
83         unsigned offset;
84 };
85
86 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
87         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
88         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
89         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
90         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
91         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
92         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
93         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
94                 rx_nombuf)},
95 };
96
97 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
98
99 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
102         {"errors", offsetof(struct rte_eth_stats, q_errors)},
103 };
104
105 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
106                 sizeof(rte_rxq_stats_strings[0]))
107
108 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
109         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
110         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
111 };
112 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
113                 sizeof(rte_txq_stats_strings[0]))
114
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 enum {
136         DEV_DETACHED = 0,
137         DEV_ATTACHED
138 };
139
140 static void
141 rte_eth_dev_data_alloc(void)
142 {
143         const unsigned flags = 0;
144         const struct rte_memzone *mz;
145
146         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
147                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
148                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
149                                 rte_socket_id(), flags);
150         } else
151                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
152         if (mz == NULL)
153                 rte_panic("Cannot allocate memzone for ethernet port data\n");
154
155         rte_eth_dev_data = mz->addr;
156         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
157                 memset(rte_eth_dev_data, 0,
158                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
159 }
160
161 struct rte_eth_dev *
162 rte_eth_dev_allocated(const char *name)
163 {
164         unsigned i;
165
166         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
167                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
168                     strcmp(rte_eth_devices[i].data->name, name) == 0)
169                         return &rte_eth_devices[i];
170         }
171         return NULL;
172 }
173
174 static uint8_t
175 rte_eth_dev_find_free_port(void)
176 {
177         unsigned i;
178
179         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
180                 if (rte_eth_devices[i].attached == DEV_DETACHED)
181                         return i;
182         }
183         return RTE_MAX_ETHPORTS;
184 }
185
186 struct rte_eth_dev *
187 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
188 {
189         uint8_t port_id;
190         struct rte_eth_dev *eth_dev;
191
192         port_id = rte_eth_dev_find_free_port();
193         if (port_id == RTE_MAX_ETHPORTS) {
194                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
195                 return NULL;
196         }
197
198         if (rte_eth_dev_data == NULL)
199                 rte_eth_dev_data_alloc();
200
201         if (rte_eth_dev_allocated(name) != NULL) {
202                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
203                                 name);
204                 return NULL;
205         }
206
207         eth_dev = &rte_eth_devices[port_id];
208         eth_dev->data = &rte_eth_dev_data[port_id];
209         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
210         eth_dev->data->port_id = port_id;
211         eth_dev->attached = DEV_ATTACHED;
212         eth_dev->dev_type = type;
213         nb_ports++;
214         return eth_dev;
215 }
216
217 static int
218 rte_eth_dev_create_unique_device_name(char *name, size_t size,
219                 struct rte_pci_device *pci_dev)
220 {
221         int ret;
222
223         ret = snprintf(name, size, "%d:%d.%d",
224                         pci_dev->addr.bus, pci_dev->addr.devid,
225                         pci_dev->addr.function);
226         if (ret < 0)
227                 return ret;
228         return 0;
229 }
230
231 int
232 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
233 {
234         if (eth_dev == NULL)
235                 return -EINVAL;
236
237         eth_dev->attached = DEV_DETACHED;
238         nb_ports--;
239         return 0;
240 }
241
242 static int
243 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
244                  struct rte_pci_device *pci_dev)
245 {
246         struct eth_driver    *eth_drv;
247         struct rte_eth_dev *eth_dev;
248         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
249
250         int diag;
251
252         eth_drv = (struct eth_driver *)pci_drv;
253
254         /* Create unique Ethernet device name using PCI address */
255         rte_eth_dev_create_unique_device_name(ethdev_name,
256                         sizeof(ethdev_name), pci_dev);
257
258         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
259         if (eth_dev == NULL)
260                 return -ENOMEM;
261
262         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
263                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
264                                   eth_drv->dev_private_size,
265                                   RTE_CACHE_LINE_SIZE);
266                 if (eth_dev->data->dev_private == NULL)
267                         rte_panic("Cannot allocate memzone for private port data\n");
268         }
269         eth_dev->pci_dev = pci_dev;
270         eth_dev->driver = eth_drv;
271         eth_dev->data->rx_mbuf_alloc_failed = 0;
272
273         /* init user callbacks */
274         TAILQ_INIT(&(eth_dev->link_intr_cbs));
275
276         /*
277          * Set the default MTU.
278          */
279         eth_dev->data->mtu = ETHER_MTU;
280
281         /* Invoke PMD device initialization function */
282         diag = (*eth_drv->eth_dev_init)(eth_dev);
283         if (diag == 0)
284                 return 0;
285
286         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
287                         pci_drv->name,
288                         (unsigned) pci_dev->id.vendor_id,
289                         (unsigned) pci_dev->id.device_id);
290         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
291                 rte_free(eth_dev->data->dev_private);
292         rte_eth_dev_release_port(eth_dev);
293         return diag;
294 }
295
296 static int
297 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
298 {
299         const struct eth_driver *eth_drv;
300         struct rte_eth_dev *eth_dev;
301         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
302         int ret;
303
304         if (pci_dev == NULL)
305                 return -EINVAL;
306
307         /* Create unique Ethernet device name using PCI address */
308         rte_eth_dev_create_unique_device_name(ethdev_name,
309                         sizeof(ethdev_name), pci_dev);
310
311         eth_dev = rte_eth_dev_allocated(ethdev_name);
312         if (eth_dev == NULL)
313                 return -ENODEV;
314
315         eth_drv = (const struct eth_driver *)pci_dev->driver;
316
317         /* Invoke PMD device uninit function */
318         if (*eth_drv->eth_dev_uninit) {
319                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
320                 if (ret)
321                         return ret;
322         }
323
324         /* free ether device */
325         rte_eth_dev_release_port(eth_dev);
326
327         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
328                 rte_free(eth_dev->data->dev_private);
329
330         eth_dev->pci_dev = NULL;
331         eth_dev->driver = NULL;
332         eth_dev->data = NULL;
333
334         return 0;
335 }
336
337 /**
338  * Register an Ethernet [Poll Mode] driver.
339  *
340  * Function invoked by the initialization function of an Ethernet driver
341  * to simultaneously register itself as a PCI driver and as an Ethernet
342  * Poll Mode Driver.
343  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
344  * structure embedded in the *eth_drv* structure, after having stored the
345  * address of the rte_eth_dev_init() function in the *devinit* field of
346  * the *pci_drv* structure.
347  * During the PCI probing phase, the rte_eth_dev_init() function is
348  * invoked for each PCI [Ethernet device] matching the embedded PCI
349  * identifiers provided by the driver.
350  */
351 void
352 rte_eth_driver_register(struct eth_driver *eth_drv)
353 {
354         eth_drv->pci_drv.devinit = rte_eth_dev_init;
355         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
356         rte_eal_pci_register(&eth_drv->pci_drv);
357 }
358
359 int
360 rte_eth_dev_is_valid_port(uint8_t port_id)
361 {
362         if (port_id >= RTE_MAX_ETHPORTS ||
363             rte_eth_devices[port_id].attached != DEV_ATTACHED)
364                 return 0;
365         else
366                 return 1;
367 }
368
369 int
370 rte_eth_dev_socket_id(uint8_t port_id)
371 {
372         if (!rte_eth_dev_is_valid_port(port_id))
373                 return -1;
374         return rte_eth_devices[port_id].data->numa_node;
375 }
376
377 uint8_t
378 rte_eth_dev_count(void)
379 {
380         return nb_ports;
381 }
382
383 static enum rte_eth_dev_type
384 rte_eth_dev_get_device_type(uint8_t port_id)
385 {
386         if (!rte_eth_dev_is_valid_port(port_id))
387                 return RTE_ETH_DEV_UNKNOWN;
388         return rte_eth_devices[port_id].dev_type;
389 }
390
391 static int
392 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
393 {
394         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
395
396         if (addr == NULL) {
397                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
398                 return -EINVAL;
399         }
400
401         *addr = rte_eth_devices[port_id].pci_dev->addr;
402         return 0;
403 }
404
405 static int
406 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
407 {
408         char *tmp;
409
410         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
411
412         if (name == NULL) {
413                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
414                 return -EINVAL;
415         }
416
417         /* shouldn't check 'rte_eth_devices[i].data',
418          * because it might be overwritten by VDEV PMD */
419         tmp = rte_eth_dev_data[port_id].name;
420         strcpy(name, tmp);
421         return 0;
422 }
423
424 static int
425 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
426 {
427         int i;
428
429         if (name == NULL) {
430                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
431                 return -EINVAL;
432         }
433
434         *port_id = RTE_MAX_ETHPORTS;
435
436         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
437
438                 if (!strncmp(name,
439                         rte_eth_dev_data[i].name, strlen(name))) {
440
441                         *port_id = i;
442
443                         return 0;
444                 }
445         }
446         return -ENODEV;
447 }
448
449 static int
450 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
451 {
452         int i;
453         struct rte_pci_device *pci_dev = NULL;
454
455         if (addr == NULL) {
456                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
457                 return -EINVAL;
458         }
459
460         *port_id = RTE_MAX_ETHPORTS;
461
462         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
463
464                 pci_dev = rte_eth_devices[i].pci_dev;
465
466                 if (pci_dev &&
467                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
468
469                         *port_id = i;
470
471                         return 0;
472                 }
473         }
474         return -ENODEV;
475 }
476
477 static int
478 rte_eth_dev_is_detachable(uint8_t port_id)
479 {
480         uint32_t dev_flags;
481
482         if (!rte_eth_dev_is_valid_port(port_id)) {
483                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
484                 return -EINVAL;
485         }
486
487         switch (rte_eth_devices[port_id].data->kdrv) {
488         case RTE_KDRV_IGB_UIO:
489         case RTE_KDRV_UIO_GENERIC:
490         case RTE_KDRV_NIC_UIO:
491         case RTE_KDRV_NONE:
492                 break;
493         case RTE_KDRV_VFIO:
494         default:
495                 return -ENOTSUP;
496         }
497         dev_flags = rte_eth_devices[port_id].data->dev_flags;
498         return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
499 }
500
501 /* attach the new physical device, then store port_id of the device */
502 static int
503 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
504 {
505         /* re-construct pci_device_list */
506         if (rte_eal_pci_scan())
507                 goto err;
508         /* Invoke probe func of the driver can handle the new device. */
509         if (rte_eal_pci_probe_one(addr))
510                 goto err;
511
512         if (rte_eth_dev_get_port_by_addr(addr, port_id))
513                 goto err;
514
515         return 0;
516 err:
517         return -1;
518 }
519
520 /* detach the new physical device, then store pci_addr of the device */
521 static int
522 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
523 {
524         struct rte_pci_addr freed_addr;
525         struct rte_pci_addr vp;
526
527         /* get pci address by port id */
528         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
529                 goto err;
530
531         /* Zeroed pci addr means the port comes from virtual device */
532         vp.domain = vp.bus = vp.devid = vp.function = 0;
533         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
534                 goto err;
535
536         /* invoke devuninit func of the pci driver,
537          * also remove the device from pci_device_list */
538         if (rte_eal_pci_detach(&freed_addr))
539                 goto err;
540
541         *addr = freed_addr;
542         return 0;
543 err:
544         return -1;
545 }
546
547 /* attach the new virtual device, then store port_id of the device */
548 static int
549 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
550 {
551         char *name = NULL, *args = NULL;
552         int ret = -1;
553
554         /* parse vdevargs, then retrieve device name and args */
555         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
556                 goto end;
557
558         /* walk around dev_driver_list to find the driver of the device,
559          * then invoke probe function of the driver.
560          * rte_eal_vdev_init() updates port_id allocated after
561          * initialization.
562          */
563         if (rte_eal_vdev_init(name, args))
564                 goto end;
565
566         if (rte_eth_dev_get_port_by_name(name, port_id))
567                 goto end;
568
569         ret = 0;
570 end:
571         free(name);
572         free(args);
573
574         return ret;
575 }
576
577 /* detach the new virtual device, then store the name of the device */
578 static int
579 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
580 {
581         char name[RTE_ETH_NAME_MAX_LEN];
582
583         /* get device name by port id */
584         if (rte_eth_dev_get_name_by_port(port_id, name))
585                 goto err;
586         /* walk around dev_driver_list to find the driver of the device,
587          * then invoke uninit function of the driver */
588         if (rte_eal_vdev_uninit(name))
589                 goto err;
590
591         strncpy(vdevname, name, sizeof(name));
592         return 0;
593 err:
594         return -1;
595 }
596
597 /* attach the new device, then store port_id of the device */
598 int
599 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
600 {
601         struct rte_pci_addr addr;
602         int ret = -1;
603
604         if ((devargs == NULL) || (port_id == NULL)) {
605                 ret = -EINVAL;
606                 goto err;
607         }
608
609         if (eal_parse_pci_DomBDF(devargs, &addr) == 0) {
610                 ret = rte_eth_dev_attach_pdev(&addr, port_id);
611                 if (ret < 0)
612                         goto err;
613         } else {
614                 ret = rte_eth_dev_attach_vdev(devargs, port_id);
615                 if (ret < 0)
616                         goto err;
617         }
618
619         return 0;
620 err:
621         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
622         return ret;
623 }
624
625 /* detach the device, then store the name of the device */
626 int
627 rte_eth_dev_detach(uint8_t port_id, char *name)
628 {
629         struct rte_pci_addr addr;
630         int ret = -1;
631
632         if (name == NULL) {
633                 ret = -EINVAL;
634                 goto err;
635         }
636
637         /* check whether the driver supports detach feature, or not */
638         if (rte_eth_dev_is_detachable(port_id))
639                 goto err;
640
641         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
642                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
643                 if (ret < 0)
644                         goto err;
645
646                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
647                 if (ret < 0)
648                         goto err;
649
650                 snprintf(name, RTE_ETH_NAME_MAX_LEN,
651                         "%04x:%02x:%02x.%d",
652                         addr.domain, addr.bus,
653                         addr.devid, addr.function);
654         } else {
655                 ret = rte_eth_dev_detach_vdev(port_id, name);
656                 if (ret < 0)
657                         goto err;
658         }
659
660         return 0;
661
662 err:
663         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
664         return ret;
665 }
666
667 static int
668 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
669 {
670         uint16_t old_nb_queues = dev->data->nb_rx_queues;
671         void **rxq;
672         unsigned i;
673
674         if (dev->data->rx_queues == NULL) { /* first time configuration */
675                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
676                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
677                                 RTE_CACHE_LINE_SIZE);
678                 if (dev->data->rx_queues == NULL) {
679                         dev->data->nb_rx_queues = 0;
680                         return -(ENOMEM);
681                 }
682         } else { /* re-configure */
683                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
684
685                 rxq = dev->data->rx_queues;
686
687                 for (i = nb_queues; i < old_nb_queues; i++)
688                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
689                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
690                                 RTE_CACHE_LINE_SIZE);
691                 if (rxq == NULL)
692                         return -(ENOMEM);
693                 if (nb_queues > old_nb_queues) {
694                         uint16_t new_qs = nb_queues - old_nb_queues;
695
696                         memset(rxq + old_nb_queues, 0,
697                                 sizeof(rxq[0]) * new_qs);
698                 }
699
700                 dev->data->rx_queues = rxq;
701
702         }
703         dev->data->nb_rx_queues = nb_queues;
704         return 0;
705 }
706
707 int
708 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
709 {
710         struct rte_eth_dev *dev;
711
712         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
713
714         dev = &rte_eth_devices[port_id];
715         if (rx_queue_id >= dev->data->nb_rx_queues) {
716                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
717                 return -EINVAL;
718         }
719
720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
721
722         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
723                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
724                         " already started\n",
725                         rx_queue_id, port_id);
726                 return 0;
727         }
728
729         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
730
731 }
732
733 int
734 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
735 {
736         struct rte_eth_dev *dev;
737
738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
739
740         dev = &rte_eth_devices[port_id];
741         if (rx_queue_id >= dev->data->nb_rx_queues) {
742                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
743                 return -EINVAL;
744         }
745
746         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
747
748         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
749                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
750                         " already stopped\n",
751                         rx_queue_id, port_id);
752                 return 0;
753         }
754
755         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
756
757 }
758
759 int
760 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
761 {
762         struct rte_eth_dev *dev;
763
764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
765
766         dev = &rte_eth_devices[port_id];
767         if (tx_queue_id >= dev->data->nb_tx_queues) {
768                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
769                 return -EINVAL;
770         }
771
772         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
773
774         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
775                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
776                         " already started\n",
777                         tx_queue_id, port_id);
778                 return 0;
779         }
780
781         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
782
783 }
784
785 int
786 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
787 {
788         struct rte_eth_dev *dev;
789
790         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
791
792         dev = &rte_eth_devices[port_id];
793         if (tx_queue_id >= dev->data->nb_tx_queues) {
794                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
795                 return -EINVAL;
796         }
797
798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
799
800         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
801                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
802                         " already stopped\n",
803                         tx_queue_id, port_id);
804                 return 0;
805         }
806
807         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
808
809 }
810
811 static int
812 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
813 {
814         uint16_t old_nb_queues = dev->data->nb_tx_queues;
815         void **txq;
816         unsigned i;
817
818         if (dev->data->tx_queues == NULL) { /* first time configuration */
819                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
820                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
821                                                    RTE_CACHE_LINE_SIZE);
822                 if (dev->data->tx_queues == NULL) {
823                         dev->data->nb_tx_queues = 0;
824                         return -(ENOMEM);
825                 }
826         } else { /* re-configure */
827                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
828
829                 txq = dev->data->tx_queues;
830
831                 for (i = nb_queues; i < old_nb_queues; i++)
832                         (*dev->dev_ops->tx_queue_release)(txq[i]);
833                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
834                                   RTE_CACHE_LINE_SIZE);
835                 if (txq == NULL)
836                         return -ENOMEM;
837                 if (nb_queues > old_nb_queues) {
838                         uint16_t new_qs = nb_queues - old_nb_queues;
839
840                         memset(txq + old_nb_queues, 0,
841                                sizeof(txq[0]) * new_qs);
842                 }
843
844                 dev->data->tx_queues = txq;
845
846         }
847         dev->data->nb_tx_queues = nb_queues;
848         return 0;
849 }
850
851 int
852 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
853                       const struct rte_eth_conf *dev_conf)
854 {
855         struct rte_eth_dev *dev;
856         struct rte_eth_dev_info dev_info;
857         int diag;
858
859         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
860
861         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
862                 RTE_PMD_DEBUG_TRACE(
863                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
864                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
865                 return -EINVAL;
866         }
867
868         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
869                 RTE_PMD_DEBUG_TRACE(
870                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
871                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
872                 return -EINVAL;
873         }
874
875         dev = &rte_eth_devices[port_id];
876
877         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
878         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
879
880         if (dev->data->dev_started) {
881                 RTE_PMD_DEBUG_TRACE(
882                     "port %d must be stopped to allow configuration\n", port_id);
883                 return -EBUSY;
884         }
885
886         /*
887          * Check that the numbers of RX and TX queues are not greater
888          * than the maximum number of RX and TX queues supported by the
889          * configured device.
890          */
891         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
892         if (nb_rx_q > dev_info.max_rx_queues) {
893                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
894                                 port_id, nb_rx_q, dev_info.max_rx_queues);
895                 return -EINVAL;
896         }
897         if (nb_rx_q == 0) {
898                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
899                 return -EINVAL;
900         }
901
902         if (nb_tx_q > dev_info.max_tx_queues) {
903                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
904                                 port_id, nb_tx_q, dev_info.max_tx_queues);
905                 return -EINVAL;
906         }
907         if (nb_tx_q == 0) {
908                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
909                 return -EINVAL;
910         }
911
912         /* Copy the dev_conf parameter into the dev structure */
913         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
914
915         /*
916          * If link state interrupt is enabled, check that the
917          * device supports it.
918          */
919         if ((dev_conf->intr_conf.lsc == 1) &&
920                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
921                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
922                                         dev->data->drv_name);
923                         return -EINVAL;
924         }
925
926         /*
927          * If jumbo frames are enabled, check that the maximum RX packet
928          * length is supported by the configured device.
929          */
930         if (dev_conf->rxmode.jumbo_frame == 1) {
931                 if (dev_conf->rxmode.max_rx_pkt_len >
932                     dev_info.max_rx_pktlen) {
933                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
934                                 " > max valid value %u\n",
935                                 port_id,
936                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
937                                 (unsigned)dev_info.max_rx_pktlen);
938                         return -EINVAL;
939                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
940                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
941                                 " < min valid value %u\n",
942                                 port_id,
943                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
944                                 (unsigned)ETHER_MIN_LEN);
945                         return -EINVAL;
946                 }
947         } else {
948                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
949                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
950                         /* Use default value */
951                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
952                                                         ETHER_MAX_LEN;
953         }
954
955         /*
956          * Setup new number of RX/TX queues and reconfigure device.
957          */
958         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
959         if (diag != 0) {
960                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
961                                 port_id, diag);
962                 return diag;
963         }
964
965         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
966         if (diag != 0) {
967                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
968                                 port_id, diag);
969                 rte_eth_dev_rx_queue_config(dev, 0);
970                 return diag;
971         }
972
973         diag = (*dev->dev_ops->dev_configure)(dev);
974         if (diag != 0) {
975                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
976                                 port_id, diag);
977                 rte_eth_dev_rx_queue_config(dev, 0);
978                 rte_eth_dev_tx_queue_config(dev, 0);
979                 return diag;
980         }
981
982         return 0;
983 }
984
985 static void
986 rte_eth_dev_config_restore(uint8_t port_id)
987 {
988         struct rte_eth_dev *dev;
989         struct rte_eth_dev_info dev_info;
990         struct ether_addr addr;
991         uint16_t i;
992         uint32_t pool = 0;
993
994         dev = &rte_eth_devices[port_id];
995
996         rte_eth_dev_info_get(port_id, &dev_info);
997
998         if (RTE_ETH_DEV_SRIOV(dev).active)
999                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1000
1001         /* replay MAC address configuration */
1002         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1003                 addr = dev->data->mac_addrs[i];
1004
1005                 /* skip zero address */
1006                 if (is_zero_ether_addr(&addr))
1007                         continue;
1008
1009                 /* add address to the hardware */
1010                 if  (*dev->dev_ops->mac_addr_add &&
1011                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1012                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1013                 else {
1014                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1015                                         port_id);
1016                         /* exit the loop but not return an error */
1017                         break;
1018                 }
1019         }
1020
1021         /* replay promiscuous configuration */
1022         if (rte_eth_promiscuous_get(port_id) == 1)
1023                 rte_eth_promiscuous_enable(port_id);
1024         else if (rte_eth_promiscuous_get(port_id) == 0)
1025                 rte_eth_promiscuous_disable(port_id);
1026
1027         /* replay all multicast configuration */
1028         if (rte_eth_allmulticast_get(port_id) == 1)
1029                 rte_eth_allmulticast_enable(port_id);
1030         else if (rte_eth_allmulticast_get(port_id) == 0)
1031                 rte_eth_allmulticast_disable(port_id);
1032 }
1033
1034 int
1035 rte_eth_dev_start(uint8_t port_id)
1036 {
1037         struct rte_eth_dev *dev;
1038         int diag;
1039
1040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1041
1042         dev = &rte_eth_devices[port_id];
1043
1044         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1045
1046         if (dev->data->dev_started != 0) {
1047                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1048                         " already started\n",
1049                         port_id);
1050                 return 0;
1051         }
1052
1053         diag = (*dev->dev_ops->dev_start)(dev);
1054         if (diag == 0)
1055                 dev->data->dev_started = 1;
1056         else
1057                 return diag;
1058
1059         rte_eth_dev_config_restore(port_id);
1060
1061         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1062                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1063                 (*dev->dev_ops->link_update)(dev, 0);
1064         }
1065         return 0;
1066 }
1067
1068 void
1069 rte_eth_dev_stop(uint8_t port_id)
1070 {
1071         struct rte_eth_dev *dev;
1072
1073         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1074         dev = &rte_eth_devices[port_id];
1075
1076         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1077
1078         if (dev->data->dev_started == 0) {
1079                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1080                         " already stopped\n",
1081                         port_id);
1082                 return;
1083         }
1084
1085         dev->data->dev_started = 0;
1086         (*dev->dev_ops->dev_stop)(dev);
1087 }
1088
1089 int
1090 rte_eth_dev_set_link_up(uint8_t port_id)
1091 {
1092         struct rte_eth_dev *dev;
1093
1094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1095
1096         dev = &rte_eth_devices[port_id];
1097
1098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1099         return (*dev->dev_ops->dev_set_link_up)(dev);
1100 }
1101
1102 int
1103 rte_eth_dev_set_link_down(uint8_t port_id)
1104 {
1105         struct rte_eth_dev *dev;
1106
1107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1108
1109         dev = &rte_eth_devices[port_id];
1110
1111         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1112         return (*dev->dev_ops->dev_set_link_down)(dev);
1113 }
1114
1115 void
1116 rte_eth_dev_close(uint8_t port_id)
1117 {
1118         struct rte_eth_dev *dev;
1119
1120         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1121         dev = &rte_eth_devices[port_id];
1122
1123         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1124         dev->data->dev_started = 0;
1125         (*dev->dev_ops->dev_close)(dev);
1126
1127         rte_free(dev->data->rx_queues);
1128         dev->data->rx_queues = NULL;
1129         rte_free(dev->data->tx_queues);
1130         dev->data->tx_queues = NULL;
1131 }
1132
1133 int
1134 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1135                        uint16_t nb_rx_desc, unsigned int socket_id,
1136                        const struct rte_eth_rxconf *rx_conf,
1137                        struct rte_mempool *mp)
1138 {
1139         int ret;
1140         uint32_t mbp_buf_size;
1141         struct rte_eth_dev *dev;
1142         struct rte_eth_dev_info dev_info;
1143
1144         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1145
1146         dev = &rte_eth_devices[port_id];
1147         if (rx_queue_id >= dev->data->nb_rx_queues) {
1148                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1149                 return -EINVAL;
1150         }
1151
1152         if (dev->data->dev_started) {
1153                 RTE_PMD_DEBUG_TRACE(
1154                     "port %d must be stopped to allow configuration\n", port_id);
1155                 return -EBUSY;
1156         }
1157
1158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1159         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1160
1161         /*
1162          * Check the size of the mbuf data buffer.
1163          * This value must be provided in the private data of the memory pool.
1164          * First check that the memory pool has a valid private data.
1165          */
1166         rte_eth_dev_info_get(port_id, &dev_info);
1167         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1168                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1169                                 mp->name, (int) mp->private_data_size,
1170                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1171                 return -ENOSPC;
1172         }
1173         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1174
1175         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1176                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1177                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1178                                 "=%d)\n",
1179                                 mp->name,
1180                                 (int)mbp_buf_size,
1181                                 (int)(RTE_PKTMBUF_HEADROOM +
1182                                       dev_info.min_rx_bufsize),
1183                                 (int)RTE_PKTMBUF_HEADROOM,
1184                                 (int)dev_info.min_rx_bufsize);
1185                 return -EINVAL;
1186         }
1187
1188         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1189                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1190                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1191
1192                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1193                         "should be: <= %hu, = %hu, and a product of %hu\n",
1194                         nb_rx_desc,
1195                         dev_info.rx_desc_lim.nb_max,
1196                         dev_info.rx_desc_lim.nb_min,
1197                         dev_info.rx_desc_lim.nb_align);
1198                 return -EINVAL;
1199         }
1200
1201         if (rx_conf == NULL)
1202                 rx_conf = &dev_info.default_rxconf;
1203
1204         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1205                                               socket_id, rx_conf, mp);
1206         if (!ret) {
1207                 if (!dev->data->min_rx_buf_size ||
1208                     dev->data->min_rx_buf_size > mbp_buf_size)
1209                         dev->data->min_rx_buf_size = mbp_buf_size;
1210         }
1211
1212         return ret;
1213 }
1214
1215 int
1216 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1217                        uint16_t nb_tx_desc, unsigned int socket_id,
1218                        const struct rte_eth_txconf *tx_conf)
1219 {
1220         struct rte_eth_dev *dev;
1221         struct rte_eth_dev_info dev_info;
1222
1223         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1224
1225         dev = &rte_eth_devices[port_id];
1226         if (tx_queue_id >= dev->data->nb_tx_queues) {
1227                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1228                 return -EINVAL;
1229         }
1230
1231         if (dev->data->dev_started) {
1232                 RTE_PMD_DEBUG_TRACE(
1233                     "port %d must be stopped to allow configuration\n", port_id);
1234                 return -EBUSY;
1235         }
1236
1237         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1239
1240         rte_eth_dev_info_get(port_id, &dev_info);
1241
1242         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1243             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1244             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1245                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1246                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1247                                 nb_tx_desc,
1248                                 dev_info.tx_desc_lim.nb_max,
1249                                 dev_info.tx_desc_lim.nb_min,
1250                                 dev_info.tx_desc_lim.nb_align);
1251                 return -EINVAL;
1252         }
1253
1254         if (tx_conf == NULL)
1255                 tx_conf = &dev_info.default_txconf;
1256
1257         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1258                                                socket_id, tx_conf);
1259 }
1260
1261 void
1262 rte_eth_promiscuous_enable(uint8_t port_id)
1263 {
1264         struct rte_eth_dev *dev;
1265
1266         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1267         dev = &rte_eth_devices[port_id];
1268
1269         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1270         (*dev->dev_ops->promiscuous_enable)(dev);
1271         dev->data->promiscuous = 1;
1272 }
1273
1274 void
1275 rte_eth_promiscuous_disable(uint8_t port_id)
1276 {
1277         struct rte_eth_dev *dev;
1278
1279         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1280         dev = &rte_eth_devices[port_id];
1281
1282         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1283         dev->data->promiscuous = 0;
1284         (*dev->dev_ops->promiscuous_disable)(dev);
1285 }
1286
1287 int
1288 rte_eth_promiscuous_get(uint8_t port_id)
1289 {
1290         struct rte_eth_dev *dev;
1291
1292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1293
1294         dev = &rte_eth_devices[port_id];
1295         return dev->data->promiscuous;
1296 }
1297
1298 void
1299 rte_eth_allmulticast_enable(uint8_t port_id)
1300 {
1301         struct rte_eth_dev *dev;
1302
1303         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1304         dev = &rte_eth_devices[port_id];
1305
1306         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1307         (*dev->dev_ops->allmulticast_enable)(dev);
1308         dev->data->all_multicast = 1;
1309 }
1310
1311 void
1312 rte_eth_allmulticast_disable(uint8_t port_id)
1313 {
1314         struct rte_eth_dev *dev;
1315
1316         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1317         dev = &rte_eth_devices[port_id];
1318
1319         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1320         dev->data->all_multicast = 0;
1321         (*dev->dev_ops->allmulticast_disable)(dev);
1322 }
1323
1324 int
1325 rte_eth_allmulticast_get(uint8_t port_id)
1326 {
1327         struct rte_eth_dev *dev;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1330
1331         dev = &rte_eth_devices[port_id];
1332         return dev->data->all_multicast;
1333 }
1334
1335 static inline int
1336 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1337                                 struct rte_eth_link *link)
1338 {
1339         struct rte_eth_link *dst = link;
1340         struct rte_eth_link *src = &(dev->data->dev_link);
1341
1342         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1343                                         *(uint64_t *)src) == 0)
1344                 return -1;
1345
1346         return 0;
1347 }
1348
1349 void
1350 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1351 {
1352         struct rte_eth_dev *dev;
1353
1354         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1355         dev = &rte_eth_devices[port_id];
1356
1357         if (dev->data->dev_conf.intr_conf.lsc != 0)
1358                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1359         else {
1360                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1361                 (*dev->dev_ops->link_update)(dev, 1);
1362                 *eth_link = dev->data->dev_link;
1363         }
1364 }
1365
1366 void
1367 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1368 {
1369         struct rte_eth_dev *dev;
1370
1371         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1372         dev = &rte_eth_devices[port_id];
1373
1374         if (dev->data->dev_conf.intr_conf.lsc != 0)
1375                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1376         else {
1377                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1378                 (*dev->dev_ops->link_update)(dev, 0);
1379                 *eth_link = dev->data->dev_link;
1380         }
1381 }
1382
1383 int
1384 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1385 {
1386         struct rte_eth_dev *dev;
1387
1388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1389
1390         dev = &rte_eth_devices[port_id];
1391         memset(stats, 0, sizeof(*stats));
1392
1393         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1394         (*dev->dev_ops->stats_get)(dev, stats);
1395         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1396         return 0;
1397 }
1398
1399 void
1400 rte_eth_stats_reset(uint8_t port_id)
1401 {
1402         struct rte_eth_dev *dev;
1403
1404         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1405         dev = &rte_eth_devices[port_id];
1406
1407         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1408         (*dev->dev_ops->stats_reset)(dev);
1409         dev->data->rx_mbuf_alloc_failed = 0;
1410 }
1411
1412 /* retrieve ethdev extended statistics */
1413 int
1414 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1415         unsigned n)
1416 {
1417         struct rte_eth_stats eth_stats;
1418         struct rte_eth_dev *dev;
1419         unsigned count = 0, i, q;
1420         signed xcount = 0;
1421         uint64_t val, *stats_ptr;
1422
1423         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1424
1425         dev = &rte_eth_devices[port_id];
1426
1427         /* Return generic statistics */
1428         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1429                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1430
1431         /* implemented by the driver */
1432         if (dev->dev_ops->xstats_get != NULL) {
1433                 /* Retrieve the xstats from the driver at the end of the
1434                  * xstats struct.
1435                  */
1436                 xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
1437                          (n > count) ? n - count : 0);
1438
1439                 if (xcount < 0)
1440                         return xcount;
1441         }
1442
1443         if (n < count + xcount)
1444                 return count + xcount;
1445
1446         /* now fill the xstats structure */
1447         count = 0;
1448         rte_eth_stats_get(port_id, &eth_stats);
1449
1450         /* global stats */
1451         for (i = 0; i < RTE_NB_STATS; i++) {
1452                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1453                                         rte_stats_strings[i].offset);
1454                 val = *stats_ptr;
1455                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1456                         "%s", rte_stats_strings[i].name);
1457                 xstats[count++].value = val;
1458         }
1459
1460         /* per-rxq stats */
1461         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1462                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1463                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1464                                         rte_rxq_stats_strings[i].offset +
1465                                         q * sizeof(uint64_t));
1466                         val = *stats_ptr;
1467                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1468                                 "rx_q%u_%s", q,
1469                                 rte_rxq_stats_strings[i].name);
1470                         xstats[count++].value = val;
1471                 }
1472         }
1473
1474         /* per-txq stats */
1475         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1476                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1477                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1478                                         rte_txq_stats_strings[i].offset +
1479                                         q * sizeof(uint64_t));
1480                         val = *stats_ptr;
1481                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1482                                 "tx_q%u_%s", q,
1483                                 rte_txq_stats_strings[i].name);
1484                         xstats[count++].value = val;
1485                 }
1486         }
1487
1488         return count + xcount;
1489 }
1490
1491 /* reset ethdev extended statistics */
1492 void
1493 rte_eth_xstats_reset(uint8_t port_id)
1494 {
1495         struct rte_eth_dev *dev;
1496
1497         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1498         dev = &rte_eth_devices[port_id];
1499
1500         /* implemented by the driver */
1501         if (dev->dev_ops->xstats_reset != NULL) {
1502                 (*dev->dev_ops->xstats_reset)(dev);
1503                 return;
1504         }
1505
1506         /* fallback to default */
1507         rte_eth_stats_reset(port_id);
1508 }
1509
1510 static int
1511 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1512                 uint8_t is_rx)
1513 {
1514         struct rte_eth_dev *dev;
1515
1516         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1517
1518         dev = &rte_eth_devices[port_id];
1519
1520         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1521         return (*dev->dev_ops->queue_stats_mapping_set)
1522                         (dev, queue_id, stat_idx, is_rx);
1523 }
1524
1525
1526 int
1527 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1528                 uint8_t stat_idx)
1529 {
1530         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1531                         STAT_QMAP_TX);
1532 }
1533
1534
1535 int
1536 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1537                 uint8_t stat_idx)
1538 {
1539         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1540                         STAT_QMAP_RX);
1541 }
1542
1543
1544 void
1545 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1546 {
1547         struct rte_eth_dev *dev;
1548         const struct rte_eth_desc_lim lim = {
1549                 .nb_max = UINT16_MAX,
1550                 .nb_min = 0,
1551                 .nb_align = 1,
1552         };
1553
1554         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1555         dev = &rte_eth_devices[port_id];
1556
1557         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1558         dev_info->rx_desc_lim = lim;
1559         dev_info->tx_desc_lim = lim;
1560
1561         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1562         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1563         dev_info->pci_dev = dev->pci_dev;
1564         dev_info->driver_name = dev->data->drv_name;
1565 }
1566
1567 void
1568 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1569 {
1570         struct rte_eth_dev *dev;
1571
1572         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1573         dev = &rte_eth_devices[port_id];
1574         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1575 }
1576
1577
1578 int
1579 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1580 {
1581         struct rte_eth_dev *dev;
1582
1583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1584
1585         dev = &rte_eth_devices[port_id];
1586         *mtu = dev->data->mtu;
1587         return 0;
1588 }
1589
1590 int
1591 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1592 {
1593         int ret;
1594         struct rte_eth_dev *dev;
1595
1596         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1597         dev = &rte_eth_devices[port_id];
1598         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1599
1600         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1601         if (!ret)
1602                 dev->data->mtu = mtu;
1603
1604         return ret;
1605 }
1606
1607 int
1608 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1609 {
1610         struct rte_eth_dev *dev;
1611
1612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1613         dev = &rte_eth_devices[port_id];
1614         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1615                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1616                 return -ENOSYS;
1617         }
1618
1619         if (vlan_id > 4095) {
1620                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1621                                 port_id, (unsigned) vlan_id);
1622                 return -EINVAL;
1623         }
1624         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1625
1626         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1627 }
1628
1629 int
1630 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1631 {
1632         struct rte_eth_dev *dev;
1633
1634         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1635         dev = &rte_eth_devices[port_id];
1636         if (rx_queue_id >= dev->data->nb_rx_queues) {
1637                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1638                 return -EINVAL;
1639         }
1640
1641         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1642         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1643
1644         return 0;
1645 }
1646
1647 int
1648 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1649 {
1650         struct rte_eth_dev *dev;
1651
1652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1653         dev = &rte_eth_devices[port_id];
1654         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1655         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1656
1657         return 0;
1658 }
1659
1660 int
1661 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1662 {
1663         struct rte_eth_dev *dev;
1664         int ret = 0;
1665         int mask = 0;
1666         int cur, org = 0;
1667
1668         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1669         dev = &rte_eth_devices[port_id];
1670
1671         /*check which option changed by application*/
1672         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1673         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1674         if (cur != org) {
1675                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1676                 mask |= ETH_VLAN_STRIP_MASK;
1677         }
1678
1679         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1680         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1681         if (cur != org) {
1682                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1683                 mask |= ETH_VLAN_FILTER_MASK;
1684         }
1685
1686         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1687         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1688         if (cur != org) {
1689                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1690                 mask |= ETH_VLAN_EXTEND_MASK;
1691         }
1692
1693         /*no change*/
1694         if (mask == 0)
1695                 return ret;
1696
1697         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1698         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1699
1700         return ret;
1701 }
1702
1703 int
1704 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1705 {
1706         struct rte_eth_dev *dev;
1707         int ret = 0;
1708
1709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1710         dev = &rte_eth_devices[port_id];
1711
1712         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1713                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1714
1715         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1716                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1717
1718         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1719                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1720
1721         return ret;
1722 }
1723
1724 int
1725 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1726 {
1727         struct rte_eth_dev *dev;
1728
1729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1730         dev = &rte_eth_devices[port_id];
1731         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1732         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1733
1734         return 0;
1735 }
1736
1737 int
1738 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1739 {
1740         struct rte_eth_dev *dev;
1741
1742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1743         dev = &rte_eth_devices[port_id];
1744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1745         memset(fc_conf, 0, sizeof(*fc_conf));
1746         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1747 }
1748
1749 int
1750 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1751 {
1752         struct rte_eth_dev *dev;
1753
1754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1755         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1756                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1757                 return -EINVAL;
1758         }
1759
1760         dev = &rte_eth_devices[port_id];
1761         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1762         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1763 }
1764
1765 int
1766 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1767 {
1768         struct rte_eth_dev *dev;
1769
1770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1771         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1772                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1773                 return -EINVAL;
1774         }
1775
1776         dev = &rte_eth_devices[port_id];
1777         /* High water, low water validation are device specific */
1778         if  (*dev->dev_ops->priority_flow_ctrl_set)
1779                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1780         return -ENOTSUP;
1781 }
1782
1783 static int
1784 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1785                         uint16_t reta_size)
1786 {
1787         uint16_t i, num;
1788
1789         if (!reta_conf)
1790                 return -EINVAL;
1791
1792         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1793                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1794                                                         RTE_RETA_GROUP_SIZE);
1795                 return -EINVAL;
1796         }
1797
1798         num = reta_size / RTE_RETA_GROUP_SIZE;
1799         for (i = 0; i < num; i++) {
1800                 if (reta_conf[i].mask)
1801                         return 0;
1802         }
1803
1804         return -EINVAL;
1805 }
1806
1807 static int
1808 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1809                          uint16_t reta_size,
1810                          uint8_t max_rxq)
1811 {
1812         uint16_t i, idx, shift;
1813
1814         if (!reta_conf)
1815                 return -EINVAL;
1816
1817         if (max_rxq == 0) {
1818                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1819                 return -EINVAL;
1820         }
1821
1822         for (i = 0; i < reta_size; i++) {
1823                 idx = i / RTE_RETA_GROUP_SIZE;
1824                 shift = i % RTE_RETA_GROUP_SIZE;
1825                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1826                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1827                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1828                                 "the maximum rxq index: %u\n", idx, shift,
1829                                 reta_conf[idx].reta[shift], max_rxq);
1830                         return -EINVAL;
1831                 }
1832         }
1833
1834         return 0;
1835 }
1836
1837 int
1838 rte_eth_dev_rss_reta_update(uint8_t port_id,
1839                             struct rte_eth_rss_reta_entry64 *reta_conf,
1840                             uint16_t reta_size)
1841 {
1842         struct rte_eth_dev *dev;
1843         int ret;
1844
1845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1846         /* Check mask bits */
1847         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1848         if (ret < 0)
1849                 return ret;
1850
1851         dev = &rte_eth_devices[port_id];
1852
1853         /* Check entry value */
1854         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1855                                 dev->data->nb_rx_queues);
1856         if (ret < 0)
1857                 return ret;
1858
1859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1860         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1861 }
1862
1863 int
1864 rte_eth_dev_rss_reta_query(uint8_t port_id,
1865                            struct rte_eth_rss_reta_entry64 *reta_conf,
1866                            uint16_t reta_size)
1867 {
1868         struct rte_eth_dev *dev;
1869         int ret;
1870
1871         if (port_id >= nb_ports) {
1872                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1873                 return -ENODEV;
1874         }
1875
1876         /* Check mask bits */
1877         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1878         if (ret < 0)
1879                 return ret;
1880
1881         dev = &rte_eth_devices[port_id];
1882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1883         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1884 }
1885
1886 int
1887 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1888 {
1889         struct rte_eth_dev *dev;
1890         uint16_t rss_hash_protos;
1891
1892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1893         rss_hash_protos = rss_conf->rss_hf;
1894         if ((rss_hash_protos != 0) &&
1895             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1896                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1897                                 rss_hash_protos);
1898                 return -EINVAL;
1899         }
1900         dev = &rte_eth_devices[port_id];
1901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1902         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1903 }
1904
1905 int
1906 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1907                               struct rte_eth_rss_conf *rss_conf)
1908 {
1909         struct rte_eth_dev *dev;
1910
1911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1912         dev = &rte_eth_devices[port_id];
1913         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1914         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1915 }
1916
1917 int
1918 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
1919                            struct rte_eth_udp_tunnel *udp_tunnel)
1920 {
1921         struct rte_eth_dev *dev;
1922
1923         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1924         if (udp_tunnel == NULL) {
1925                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1926                 return -EINVAL;
1927         }
1928
1929         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1930                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1931                 return -EINVAL;
1932         }
1933
1934         dev = &rte_eth_devices[port_id];
1935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
1936         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
1937 }
1938
1939 int
1940 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
1941                               struct rte_eth_udp_tunnel *udp_tunnel)
1942 {
1943         struct rte_eth_dev *dev;
1944
1945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1946         dev = &rte_eth_devices[port_id];
1947
1948         if (udp_tunnel == NULL) {
1949                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1950                 return -EINVAL;
1951         }
1952
1953         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1954                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1955                 return -EINVAL;
1956         }
1957
1958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
1959         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
1960 }
1961
1962 int
1963 rte_eth_led_on(uint8_t port_id)
1964 {
1965         struct rte_eth_dev *dev;
1966
1967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1968         dev = &rte_eth_devices[port_id];
1969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1970         return (*dev->dev_ops->dev_led_on)(dev);
1971 }
1972
1973 int
1974 rte_eth_led_off(uint8_t port_id)
1975 {
1976         struct rte_eth_dev *dev;
1977
1978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1979         dev = &rte_eth_devices[port_id];
1980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1981         return (*dev->dev_ops->dev_led_off)(dev);
1982 }
1983
1984 /*
1985  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1986  * an empty spot.
1987  */
1988 static int
1989 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
1990 {
1991         struct rte_eth_dev_info dev_info;
1992         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1993         unsigned i;
1994
1995         rte_eth_dev_info_get(port_id, &dev_info);
1996
1997         for (i = 0; i < dev_info.max_mac_addrs; i++)
1998                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1999                         return i;
2000
2001         return -1;
2002 }
2003
2004 static const struct ether_addr null_mac_addr;
2005
2006 int
2007 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2008                         uint32_t pool)
2009 {
2010         struct rte_eth_dev *dev;
2011         int index;
2012         uint64_t pool_mask;
2013
2014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2015         dev = &rte_eth_devices[port_id];
2016         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2017
2018         if (is_zero_ether_addr(addr)) {
2019                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2020                         port_id);
2021                 return -EINVAL;
2022         }
2023         if (pool >= ETH_64_POOLS) {
2024                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2025                 return -EINVAL;
2026         }
2027
2028         index = get_mac_addr_index(port_id, addr);
2029         if (index < 0) {
2030                 index = get_mac_addr_index(port_id, &null_mac_addr);
2031                 if (index < 0) {
2032                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2033                                 port_id);
2034                         return -ENOSPC;
2035                 }
2036         } else {
2037                 pool_mask = dev->data->mac_pool_sel[index];
2038
2039                 /* Check if both MAC address and pool is already there, and do nothing */
2040                 if (pool_mask & (1ULL << pool))
2041                         return 0;
2042         }
2043
2044         /* Update NIC */
2045         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2046
2047         /* Update address in NIC data structure */
2048         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2049
2050         /* Update pool bitmap in NIC data structure */
2051         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2052
2053         return 0;
2054 }
2055
2056 int
2057 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2058 {
2059         struct rte_eth_dev *dev;
2060         int index;
2061
2062         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2063         dev = &rte_eth_devices[port_id];
2064         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2065
2066         index = get_mac_addr_index(port_id, addr);
2067         if (index == 0) {
2068                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2069                 return -EADDRINUSE;
2070         } else if (index < 0)
2071                 return 0;  /* Do nothing if address wasn't found */
2072
2073         /* Update NIC */
2074         (*dev->dev_ops->mac_addr_remove)(dev, index);
2075
2076         /* Update address in NIC data structure */
2077         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2078
2079         /* reset pool bitmap */
2080         dev->data->mac_pool_sel[index] = 0;
2081
2082         return 0;
2083 }
2084
2085 int
2086 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2087 {
2088         struct rte_eth_dev *dev;
2089
2090         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2091
2092         if (!is_valid_assigned_ether_addr(addr))
2093                 return -EINVAL;
2094
2095         dev = &rte_eth_devices[port_id];
2096         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2097
2098         /* Update default address in NIC data structure */
2099         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2100
2101         (*dev->dev_ops->mac_addr_set)(dev, addr);
2102
2103         return 0;
2104 }
2105
2106 int
2107 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2108                                 uint16_t rx_mode, uint8_t on)
2109 {
2110         uint16_t num_vfs;
2111         struct rte_eth_dev *dev;
2112         struct rte_eth_dev_info dev_info;
2113
2114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2115
2116         dev = &rte_eth_devices[port_id];
2117         rte_eth_dev_info_get(port_id, &dev_info);
2118
2119         num_vfs = dev_info.max_vfs;
2120         if (vf > num_vfs) {
2121                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2122                 return -EINVAL;
2123         }
2124
2125         if (rx_mode == 0) {
2126                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2127                 return -EINVAL;
2128         }
2129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2130         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2131 }
2132
2133 /*
2134  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2135  * an empty spot.
2136  */
2137 static int
2138 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2139 {
2140         struct rte_eth_dev_info dev_info;
2141         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2142         unsigned i;
2143
2144         rte_eth_dev_info_get(port_id, &dev_info);
2145         if (!dev->data->hash_mac_addrs)
2146                 return -1;
2147
2148         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2149                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2150                         ETHER_ADDR_LEN) == 0)
2151                         return i;
2152
2153         return -1;
2154 }
2155
2156 int
2157 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2158                                 uint8_t on)
2159 {
2160         int index;
2161         int ret;
2162         struct rte_eth_dev *dev;
2163
2164         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2165
2166         dev = &rte_eth_devices[port_id];
2167         if (is_zero_ether_addr(addr)) {
2168                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2169                         port_id);
2170                 return -EINVAL;
2171         }
2172
2173         index = get_hash_mac_addr_index(port_id, addr);
2174         /* Check if it's already there, and do nothing */
2175         if ((index >= 0) && (on))
2176                 return 0;
2177
2178         if (index < 0) {
2179                 if (!on) {
2180                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2181                                 "set in UTA\n", port_id);
2182                         return -EINVAL;
2183                 }
2184
2185                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2186                 if (index < 0) {
2187                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2188                                         port_id);
2189                         return -ENOSPC;
2190                 }
2191         }
2192
2193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2194         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2195         if (ret == 0) {
2196                 /* Update address in NIC data structure */
2197                 if (on)
2198                         ether_addr_copy(addr,
2199                                         &dev->data->hash_mac_addrs[index]);
2200                 else
2201                         ether_addr_copy(&null_mac_addr,
2202                                         &dev->data->hash_mac_addrs[index]);
2203         }
2204
2205         return ret;
2206 }
2207
2208 int
2209 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2210 {
2211         struct rte_eth_dev *dev;
2212
2213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2214
2215         dev = &rte_eth_devices[port_id];
2216
2217         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2218         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2219 }
2220
2221 int
2222 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2223 {
2224         uint16_t num_vfs;
2225         struct rte_eth_dev *dev;
2226         struct rte_eth_dev_info dev_info;
2227
2228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2229
2230         dev = &rte_eth_devices[port_id];
2231         rte_eth_dev_info_get(port_id, &dev_info);
2232
2233         num_vfs = dev_info.max_vfs;
2234         if (vf > num_vfs) {
2235                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2236                 return -EINVAL;
2237         }
2238
2239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2240         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2241 }
2242
2243 int
2244 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2245 {
2246         uint16_t num_vfs;
2247         struct rte_eth_dev *dev;
2248         struct rte_eth_dev_info dev_info;
2249
2250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251
2252         dev = &rte_eth_devices[port_id];
2253         rte_eth_dev_info_get(port_id, &dev_info);
2254
2255         num_vfs = dev_info.max_vfs;
2256         if (vf > num_vfs) {
2257                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2258                 return -EINVAL;
2259         }
2260
2261         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2262         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2263 }
2264
2265 int
2266 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2267                                uint64_t vf_mask, uint8_t vlan_on)
2268 {
2269         struct rte_eth_dev *dev;
2270
2271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2272
2273         dev = &rte_eth_devices[port_id];
2274
2275         if (vlan_id > ETHER_MAX_VLAN_ID) {
2276                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2277                         vlan_id);
2278                 return -EINVAL;
2279         }
2280
2281         if (vf_mask == 0) {
2282                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2283                 return -EINVAL;
2284         }
2285
2286         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2287         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2288                                                    vf_mask, vlan_on);
2289 }
2290
2291 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2292                                         uint16_t tx_rate)
2293 {
2294         struct rte_eth_dev *dev;
2295         struct rte_eth_dev_info dev_info;
2296         struct rte_eth_link link;
2297
2298         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2299
2300         dev = &rte_eth_devices[port_id];
2301         rte_eth_dev_info_get(port_id, &dev_info);
2302         link = dev->data->dev_link;
2303
2304         if (queue_idx > dev_info.max_tx_queues) {
2305                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2306                                 "invalid queue id=%d\n", port_id, queue_idx);
2307                 return -EINVAL;
2308         }
2309
2310         if (tx_rate > link.link_speed) {
2311                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2312                                 "bigger than link speed= %d\n",
2313                         tx_rate, link.link_speed);
2314                 return -EINVAL;
2315         }
2316
2317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2318         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2319 }
2320
2321 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2322                                 uint64_t q_msk)
2323 {
2324         struct rte_eth_dev *dev;
2325         struct rte_eth_dev_info dev_info;
2326         struct rte_eth_link link;
2327
2328         if (q_msk == 0)
2329                 return 0;
2330
2331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2332
2333         dev = &rte_eth_devices[port_id];
2334         rte_eth_dev_info_get(port_id, &dev_info);
2335         link = dev->data->dev_link;
2336
2337         if (vf > dev_info.max_vfs) {
2338                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2339                                 "invalid vf id=%d\n", port_id, vf);
2340                 return -EINVAL;
2341         }
2342
2343         if (tx_rate > link.link_speed) {
2344                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2345                                 "bigger than link speed= %d\n",
2346                                 tx_rate, link.link_speed);
2347                 return -EINVAL;
2348         }
2349
2350         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2351         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2352 }
2353
2354 int
2355 rte_eth_mirror_rule_set(uint8_t port_id,
2356                         struct rte_eth_mirror_conf *mirror_conf,
2357                         uint8_t rule_id, uint8_t on)
2358 {
2359         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2360
2361         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2362         if (mirror_conf->rule_type == 0) {
2363                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2364                 return -EINVAL;
2365         }
2366
2367         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2368                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2369                                 ETH_64_POOLS - 1);
2370                 return -EINVAL;
2371         }
2372
2373         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2374              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2375             (mirror_conf->pool_mask == 0)) {
2376                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2377                 return -EINVAL;
2378         }
2379
2380         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2381             mirror_conf->vlan.vlan_mask == 0) {
2382                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2383                 return -EINVAL;
2384         }
2385
2386         dev = &rte_eth_devices[port_id];
2387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2388
2389         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2390 }
2391
2392 int
2393 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2394 {
2395         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2396
2397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2398
2399         dev = &rte_eth_devices[port_id];
2400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2401
2402         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2403 }
2404
2405 int
2406 rte_eth_dev_callback_register(uint8_t port_id,
2407                         enum rte_eth_event_type event,
2408                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2409 {
2410         struct rte_eth_dev *dev;
2411         struct rte_eth_dev_callback *user_cb;
2412
2413         if (!cb_fn)
2414                 return -EINVAL;
2415
2416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2417
2418         dev = &rte_eth_devices[port_id];
2419         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2420
2421         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2422                 if (user_cb->cb_fn == cb_fn &&
2423                         user_cb->cb_arg == cb_arg &&
2424                         user_cb->event == event) {
2425                         break;
2426                 }
2427         }
2428
2429         /* create a new callback. */
2430         if (user_cb == NULL)
2431                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2432                                         sizeof(struct rte_eth_dev_callback), 0);
2433         if (user_cb != NULL) {
2434                 user_cb->cb_fn = cb_fn;
2435                 user_cb->cb_arg = cb_arg;
2436                 user_cb->event = event;
2437                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2438         }
2439
2440         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2441         return (user_cb == NULL) ? -ENOMEM : 0;
2442 }
2443
2444 int
2445 rte_eth_dev_callback_unregister(uint8_t port_id,
2446                         enum rte_eth_event_type event,
2447                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2448 {
2449         int ret;
2450         struct rte_eth_dev *dev;
2451         struct rte_eth_dev_callback *cb, *next;
2452
2453         if (!cb_fn)
2454                 return -EINVAL;
2455
2456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2457
2458         dev = &rte_eth_devices[port_id];
2459         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2460
2461         ret = 0;
2462         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2463
2464                 next = TAILQ_NEXT(cb, next);
2465
2466                 if (cb->cb_fn != cb_fn || cb->event != event ||
2467                                 (cb->cb_arg != (void *)-1 &&
2468                                 cb->cb_arg != cb_arg))
2469                         continue;
2470
2471                 /*
2472                  * if this callback is not executing right now,
2473                  * then remove it.
2474                  */
2475                 if (cb->active == 0) {
2476                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2477                         rte_free(cb);
2478                 } else {
2479                         ret = -EAGAIN;
2480                 }
2481         }
2482
2483         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2484         return ret;
2485 }
2486
2487 void
2488 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2489         enum rte_eth_event_type event)
2490 {
2491         struct rte_eth_dev_callback *cb_lst;
2492         struct rte_eth_dev_callback dev_cb;
2493
2494         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2495         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2496                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2497                         continue;
2498                 dev_cb = *cb_lst;
2499                 cb_lst->active = 1;
2500                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2501                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2502                                                 dev_cb.cb_arg);
2503                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2504                 cb_lst->active = 0;
2505         }
2506         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2507 }
2508
2509 int
2510 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2511 {
2512         uint32_t vec;
2513         struct rte_eth_dev *dev;
2514         struct rte_intr_handle *intr_handle;
2515         uint16_t qid;
2516         int rc;
2517
2518         if (!rte_eth_dev_is_valid_port(port_id)) {
2519                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2520                 return -ENODEV;
2521         }
2522
2523         dev = &rte_eth_devices[port_id];
2524         intr_handle = &dev->pci_dev->intr_handle;
2525         if (!intr_handle->intr_vec) {
2526                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2527                 return -EPERM;
2528         }
2529
2530         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2531                 vec = intr_handle->intr_vec[qid];
2532                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2533                 if (rc && rc != -EEXIST) {
2534                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2535                                         " op %d epfd %d vec %u\n",
2536                                         port_id, qid, op, epfd, vec);
2537                 }
2538         }
2539
2540         return 0;
2541 }
2542
2543 const struct rte_memzone *
2544 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2545                          uint16_t queue_id, size_t size, unsigned align,
2546                          int socket_id)
2547 {
2548         char z_name[RTE_MEMZONE_NAMESIZE];
2549         const struct rte_memzone *mz;
2550
2551         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2552                  dev->driver->pci_drv.name, ring_name,
2553                  dev->data->port_id, queue_id);
2554
2555         mz = rte_memzone_lookup(z_name);
2556         if (mz)
2557                 return mz;
2558
2559         if (rte_xen_dom0_supported())
2560                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2561                                                    0, align, RTE_PGSIZE_2M);
2562         else
2563                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2564                                                    0, align);
2565 }
2566
2567 int
2568 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2569                           int epfd, int op, void *data)
2570 {
2571         uint32_t vec;
2572         struct rte_eth_dev *dev;
2573         struct rte_intr_handle *intr_handle;
2574         int rc;
2575
2576         if (!rte_eth_dev_is_valid_port(port_id)) {
2577                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
2578                 return -ENODEV;
2579         }
2580
2581         dev = &rte_eth_devices[port_id];
2582         if (queue_id >= dev->data->nb_rx_queues) {
2583                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2584                 return -EINVAL;
2585         }
2586
2587         intr_handle = &dev->pci_dev->intr_handle;
2588         if (!intr_handle->intr_vec) {
2589                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2590                 return -EPERM;
2591         }
2592
2593         vec = intr_handle->intr_vec[queue_id];
2594         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2595         if (rc && rc != -EEXIST) {
2596                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2597                                 " op %d epfd %d vec %u\n",
2598                                 port_id, queue_id, op, epfd, vec);
2599                 return rc;
2600         }
2601
2602         return 0;
2603 }
2604
2605 int
2606 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2607                            uint16_t queue_id)
2608 {
2609         struct rte_eth_dev *dev;
2610
2611         if (!rte_eth_dev_is_valid_port(port_id)) {
2612                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2613                 return -ENODEV;
2614         }
2615
2616         dev = &rte_eth_devices[port_id];
2617
2618         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2619         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2620 }
2621
2622 int
2623 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2624                             uint16_t queue_id)
2625 {
2626         struct rte_eth_dev *dev;
2627
2628         if (!rte_eth_dev_is_valid_port(port_id)) {
2629                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2630                 return -ENODEV;
2631         }
2632
2633         dev = &rte_eth_devices[port_id];
2634
2635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2636         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2637 }
2638
2639 #ifdef RTE_NIC_BYPASS
2640 int rte_eth_dev_bypass_init(uint8_t port_id)
2641 {
2642         struct rte_eth_dev *dev;
2643
2644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2645
2646         dev = &rte_eth_devices[port_id];
2647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2648         (*dev->dev_ops->bypass_init)(dev);
2649         return 0;
2650 }
2651
2652 int
2653 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2654 {
2655         struct rte_eth_dev *dev;
2656
2657         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2658
2659         dev = &rte_eth_devices[port_id];
2660         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2661         (*dev->dev_ops->bypass_state_show)(dev, state);
2662         return 0;
2663 }
2664
2665 int
2666 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2667 {
2668         struct rte_eth_dev *dev;
2669
2670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2671
2672         dev = &rte_eth_devices[port_id];
2673         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2674         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2675         return 0;
2676 }
2677
2678 int
2679 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2680 {
2681         struct rte_eth_dev *dev;
2682
2683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684
2685         dev = &rte_eth_devices[port_id];
2686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2687         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2688         return 0;
2689 }
2690
2691 int
2692 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2693 {
2694         struct rte_eth_dev *dev;
2695
2696         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2697
2698         dev = &rte_eth_devices[port_id];
2699
2700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2701         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2702         return 0;
2703 }
2704
2705 int
2706 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2707 {
2708         struct rte_eth_dev *dev;
2709
2710         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2711
2712         dev = &rte_eth_devices[port_id];
2713
2714         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2715         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2716         return 0;
2717 }
2718
2719 int
2720 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2721 {
2722         struct rte_eth_dev *dev;
2723
2724         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2725
2726         dev = &rte_eth_devices[port_id];
2727
2728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2729         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2730         return 0;
2731 }
2732
2733 int
2734 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2735 {
2736         struct rte_eth_dev *dev;
2737
2738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2739
2740         dev = &rte_eth_devices[port_id];
2741
2742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2743         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2744         return 0;
2745 }
2746
2747 int
2748 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2749 {
2750         struct rte_eth_dev *dev;
2751
2752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2753
2754         dev = &rte_eth_devices[port_id];
2755
2756         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2757         (*dev->dev_ops->bypass_wd_reset)(dev);
2758         return 0;
2759 }
2760 #endif
2761
2762 int
2763 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2764 {
2765         struct rte_eth_dev *dev;
2766
2767         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2768
2769         dev = &rte_eth_devices[port_id];
2770         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2771         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2772                                 RTE_ETH_FILTER_NOP, NULL);
2773 }
2774
2775 int
2776 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2777                        enum rte_filter_op filter_op, void *arg)
2778 {
2779         struct rte_eth_dev *dev;
2780
2781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2782
2783         dev = &rte_eth_devices[port_id];
2784         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2785         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2786 }
2787
2788 void *
2789 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2790                 rte_rx_callback_fn fn, void *user_param)
2791 {
2792 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2793         rte_errno = ENOTSUP;
2794         return NULL;
2795 #endif
2796         /* check input parameters */
2797         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2798                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2799                 rte_errno = EINVAL;
2800                 return NULL;
2801         }
2802
2803         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2804
2805         if (cb == NULL) {
2806                 rte_errno = ENOMEM;
2807                 return NULL;
2808         }
2809
2810         cb->fn.rx = fn;
2811         cb->param = user_param;
2812
2813         /* Add the callbacks in fifo order. */
2814         struct rte_eth_rxtx_callback *tail =
2815                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2816
2817         if (!tail) {
2818                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2819
2820         } else {
2821                 while (tail->next)
2822                         tail = tail->next;
2823                 tail->next = cb;
2824         }
2825
2826         return cb;
2827 }
2828
2829 void *
2830 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2831                 rte_tx_callback_fn fn, void *user_param)
2832 {
2833 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2834         rte_errno = ENOTSUP;
2835         return NULL;
2836 #endif
2837         /* check input parameters */
2838         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2839                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2840                 rte_errno = EINVAL;
2841                 return NULL;
2842         }
2843
2844         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2845
2846         if (cb == NULL) {
2847                 rte_errno = ENOMEM;
2848                 return NULL;
2849         }
2850
2851         cb->fn.tx = fn;
2852         cb->param = user_param;
2853
2854         /* Add the callbacks in fifo order. */
2855         struct rte_eth_rxtx_callback *tail =
2856                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2857
2858         if (!tail) {
2859                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2860
2861         } else {
2862                 while (tail->next)
2863                         tail = tail->next;
2864                 tail->next = cb;
2865         }
2866
2867         return cb;
2868 }
2869
2870 int
2871 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2872                 struct rte_eth_rxtx_callback *user_cb)
2873 {
2874 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2875         return -ENOTSUP;
2876 #endif
2877         /* Check input parameters. */
2878         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
2879                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2880                 return -EINVAL;
2881         }
2882
2883         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2884         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2885         struct rte_eth_rxtx_callback *prev_cb;
2886
2887         /* Reset head pointer and remove user cb if first in the list. */
2888         if (cb == user_cb) {
2889                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
2890                 return 0;
2891         }
2892
2893         /* Remove the user cb from the callback list. */
2894         do {
2895                 prev_cb = cb;
2896                 cb = cb->next;
2897
2898                 if (cb == user_cb) {
2899                         prev_cb->next = user_cb->next;
2900                         return 0;
2901                 }
2902
2903         } while (cb != NULL);
2904
2905         /* Callback wasn't found. */
2906         return -EINVAL;
2907 }
2908
2909 int
2910 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2911                 struct rte_eth_rxtx_callback *user_cb)
2912 {
2913 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2914         return -ENOTSUP;
2915 #endif
2916         /* Check input parameters. */
2917         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
2918                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2919                 return -EINVAL;
2920         }
2921
2922         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2923         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
2924         struct rte_eth_rxtx_callback *prev_cb;
2925
2926         /* Reset head pointer and remove user cb if first in the list. */
2927         if (cb == user_cb) {
2928                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
2929                 return 0;
2930         }
2931
2932         /* Remove the user cb from the callback list. */
2933         do {
2934                 prev_cb = cb;
2935                 cb = cb->next;
2936
2937                 if (cb == user_cb) {
2938                         prev_cb->next = user_cb->next;
2939                         return 0;
2940                 }
2941
2942         } while (cb != NULL);
2943
2944         /* Callback wasn't found. */
2945         return -EINVAL;
2946 }
2947
2948 int
2949 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
2950         struct rte_eth_rxq_info *qinfo)
2951 {
2952         struct rte_eth_dev *dev;
2953
2954         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2955
2956         if (qinfo == NULL)
2957                 return -EINVAL;
2958
2959         dev = &rte_eth_devices[port_id];
2960         if (queue_id >= dev->data->nb_rx_queues) {
2961                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2962                 return -EINVAL;
2963         }
2964
2965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
2966
2967         memset(qinfo, 0, sizeof(*qinfo));
2968         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
2969         return 0;
2970 }
2971
2972 int
2973 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
2974         struct rte_eth_txq_info *qinfo)
2975 {
2976         struct rte_eth_dev *dev;
2977
2978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2979
2980         if (qinfo == NULL)
2981                 return -EINVAL;
2982
2983         dev = &rte_eth_devices[port_id];
2984         if (queue_id >= dev->data->nb_tx_queues) {
2985                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2986                 return -EINVAL;
2987         }
2988
2989         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
2990
2991         memset(qinfo, 0, sizeof(*qinfo));
2992         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
2993         return 0;
2994 }
2995
2996 int
2997 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
2998                              struct ether_addr *mc_addr_set,
2999                              uint32_t nb_mc_addr)
3000 {
3001         struct rte_eth_dev *dev;
3002
3003         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3004
3005         dev = &rte_eth_devices[port_id];
3006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3007         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3008 }
3009
3010 int
3011 rte_eth_timesync_enable(uint8_t port_id)
3012 {
3013         struct rte_eth_dev *dev;
3014
3015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3016         dev = &rte_eth_devices[port_id];
3017
3018         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3019         return (*dev->dev_ops->timesync_enable)(dev);
3020 }
3021
3022 int
3023 rte_eth_timesync_disable(uint8_t port_id)
3024 {
3025         struct rte_eth_dev *dev;
3026
3027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028         dev = &rte_eth_devices[port_id];
3029
3030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3031         return (*dev->dev_ops->timesync_disable)(dev);
3032 }
3033
3034 int
3035 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3036                                    uint32_t flags)
3037 {
3038         struct rte_eth_dev *dev;
3039
3040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041         dev = &rte_eth_devices[port_id];
3042
3043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3044         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3045 }
3046
3047 int
3048 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3049 {
3050         struct rte_eth_dev *dev;
3051
3052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3053         dev = &rte_eth_devices[port_id];
3054
3055         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3056         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3057 }
3058
3059 int
3060 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3061 {
3062         struct rte_eth_dev *dev;
3063
3064         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3065         dev = &rte_eth_devices[port_id];
3066
3067         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3068         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3069 }
3070
3071 int
3072 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3073 {
3074         struct rte_eth_dev *dev;
3075
3076         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3077         dev = &rte_eth_devices[port_id];
3078
3079         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3080         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3081 }
3082
3083 int
3084 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3085 {
3086         struct rte_eth_dev *dev;
3087
3088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3089         dev = &rte_eth_devices[port_id];
3090
3091         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3092         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3093 }
3094
3095 int
3096 rte_eth_dev_get_reg_length(uint8_t port_id)
3097 {
3098         struct rte_eth_dev *dev;
3099
3100         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3101
3102         dev = &rte_eth_devices[port_id];
3103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3104         return (*dev->dev_ops->get_reg_length)(dev);
3105 }
3106
3107 int
3108 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3109 {
3110         struct rte_eth_dev *dev;
3111
3112         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3113
3114         dev = &rte_eth_devices[port_id];
3115         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3116         return (*dev->dev_ops->get_reg)(dev, info);
3117 }
3118
3119 int
3120 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3121 {
3122         struct rte_eth_dev *dev;
3123
3124         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3125
3126         dev = &rte_eth_devices[port_id];
3127         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3128         return (*dev->dev_ops->get_eeprom_length)(dev);
3129 }
3130
3131 int
3132 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3133 {
3134         struct rte_eth_dev *dev;
3135
3136         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3137
3138         dev = &rte_eth_devices[port_id];
3139         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3140         return (*dev->dev_ops->get_eeprom)(dev, info);
3141 }
3142
3143 int
3144 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3145 {
3146         struct rte_eth_dev *dev;
3147
3148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3149
3150         dev = &rte_eth_devices[port_id];
3151         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3152         return (*dev->dev_ops->set_eeprom)(dev, info);
3153 }
3154
3155 int
3156 rte_eth_dev_get_dcb_info(uint8_t port_id,
3157                              struct rte_eth_dcb_info *dcb_info)
3158 {
3159         struct rte_eth_dev *dev;
3160
3161         if (!rte_eth_dev_is_valid_port(port_id)) {
3162                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3163                 return -ENODEV;
3164         }
3165
3166         dev = &rte_eth_devices[port_id];
3167         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3168
3169         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3170         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3171 }
3172
3173 void
3174 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3175 {
3176         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3177                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3178                                 eth_dev, pci_dev);
3179                 return;
3180         }
3181
3182         eth_dev->data->dev_flags = 0;
3183         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3184                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3185         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3186                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3187
3188         eth_dev->data->kdrv = pci_dev->kdrv;
3189         eth_dev->data->numa_node = pci_dev->numa_node;
3190         eth_dev->data->drv_name = pci_dev->driver->name;
3191 }