0b9f701c9d7944a520242107178289227027fffa
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t nb_ports;
75
76 /* spinlock for eth device callbacks */
77 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
78
79 /* spinlock for add/remove rx callbacks */
80 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
81
82 /* spinlock for add/remove tx callbacks */
83 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
84
85 /* store statistics names and its offset in stats structure  */
86 struct rte_eth_xstats_name_off {
87         char name[RTE_ETH_XSTATS_NAME_SIZE];
88         unsigned offset;
89 };
90
91 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
92         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
93         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
94         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
95         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
96         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
97         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
98         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
99                 rx_nombuf)},
100 };
101
102 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
103
104 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
105         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
106         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
107         {"errors", offsetof(struct rte_eth_stats, q_errors)},
108 };
109
110 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
111                 sizeof(rte_rxq_stats_strings[0]))
112
113 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
114         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
115         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
116 };
117 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
118                 sizeof(rte_txq_stats_strings[0]))
119
120
121 /**
122  * The user application callback description.
123  *
124  * It contains callback address to be registered by user application,
125  * the pointer to the parameters for callback, and the event type.
126  */
127 struct rte_eth_dev_callback {
128         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
129         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
130         void *cb_arg;                           /**< Parameter for callback */
131         enum rte_eth_event_type event;          /**< Interrupt event type */
132         uint32_t active;                        /**< Callback is executing */
133 };
134
135 enum {
136         STAT_QMAP_TX = 0,
137         STAT_QMAP_RX
138 };
139
140 enum {
141         DEV_DETACHED = 0,
142         DEV_ATTACHED
143 };
144
145 static void
146 rte_eth_dev_data_alloc(void)
147 {
148         const unsigned flags = 0;
149         const struct rte_memzone *mz;
150
151         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
152                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
153                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
154                                 rte_socket_id(), flags);
155         } else
156                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
157         if (mz == NULL)
158                 rte_panic("Cannot allocate memzone for ethernet port data\n");
159
160         rte_eth_dev_data = mz->addr;
161         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
162                 memset(rte_eth_dev_data, 0,
163                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
164 }
165
166 struct rte_eth_dev *
167 rte_eth_dev_allocated(const char *name)
168 {
169         unsigned i;
170
171         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
172                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
173                     strcmp(rte_eth_devices[i].data->name, name) == 0)
174                         return &rte_eth_devices[i];
175         }
176         return NULL;
177 }
178
179 static uint8_t
180 rte_eth_dev_find_free_port(void)
181 {
182         unsigned i;
183
184         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
185                 if (rte_eth_devices[i].attached == DEV_DETACHED)
186                         return i;
187         }
188         return RTE_MAX_ETHPORTS;
189 }
190
191 struct rte_eth_dev *
192 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
193 {
194         uint8_t port_id;
195         struct rte_eth_dev *eth_dev;
196
197         port_id = rte_eth_dev_find_free_port();
198         if (port_id == RTE_MAX_ETHPORTS) {
199                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
200                 return NULL;
201         }
202
203         if (rte_eth_dev_data == NULL)
204                 rte_eth_dev_data_alloc();
205
206         if (rte_eth_dev_allocated(name) != NULL) {
207                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
208                                 name);
209                 return NULL;
210         }
211
212         eth_dev = &rte_eth_devices[port_id];
213         eth_dev->data = &rte_eth_dev_data[port_id];
214         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
215         eth_dev->data->port_id = port_id;
216         eth_dev->attached = DEV_ATTACHED;
217         eth_dev->dev_type = type;
218         nb_ports++;
219         return eth_dev;
220 }
221
222 static int
223 rte_eth_dev_create_unique_device_name(char *name, size_t size,
224                 struct rte_pci_device *pci_dev)
225 {
226         int ret;
227
228         ret = snprintf(name, size, "%d:%d.%d",
229                         pci_dev->addr.bus, pci_dev->addr.devid,
230                         pci_dev->addr.function);
231         if (ret < 0)
232                 return ret;
233         return 0;
234 }
235
236 int
237 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
238 {
239         if (eth_dev == NULL)
240                 return -EINVAL;
241
242         eth_dev->attached = DEV_DETACHED;
243         nb_ports--;
244         return 0;
245 }
246
247 int
248 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
249                       struct rte_pci_device *pci_dev)
250 {
251         struct eth_driver    *eth_drv;
252         struct rte_eth_dev *eth_dev;
253         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
254
255         int diag;
256
257         eth_drv = (struct eth_driver *)pci_drv;
258
259         /* Create unique Ethernet device name using PCI address */
260         rte_eth_dev_create_unique_device_name(ethdev_name,
261                         sizeof(ethdev_name), pci_dev);
262
263         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
264         if (eth_dev == NULL)
265                 return -ENOMEM;
266
267         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
268                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
269                                   eth_drv->dev_private_size,
270                                   RTE_CACHE_LINE_SIZE);
271                 if (eth_dev->data->dev_private == NULL)
272                         rte_panic("Cannot allocate memzone for private port data\n");
273         }
274         eth_dev->pci_dev = pci_dev;
275         eth_dev->driver = eth_drv;
276         eth_dev->data->rx_mbuf_alloc_failed = 0;
277
278         /* init user callbacks */
279         TAILQ_INIT(&(eth_dev->link_intr_cbs));
280
281         /*
282          * Set the default MTU.
283          */
284         eth_dev->data->mtu = ETHER_MTU;
285
286         /* Invoke PMD device initialization function */
287         diag = (*eth_drv->eth_dev_init)(eth_dev);
288         if (diag == 0)
289                 return 0;
290
291         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
292                         pci_drv->name,
293                         (unsigned) pci_dev->id.vendor_id,
294                         (unsigned) pci_dev->id.device_id);
295         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
296                 rte_free(eth_dev->data->dev_private);
297         rte_eth_dev_release_port(eth_dev);
298         return diag;
299 }
300
301 int
302 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
303 {
304         const struct eth_driver *eth_drv;
305         struct rte_eth_dev *eth_dev;
306         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
307         int ret;
308
309         if (pci_dev == NULL)
310                 return -EINVAL;
311
312         /* Create unique Ethernet device name using PCI address */
313         rte_eth_dev_create_unique_device_name(ethdev_name,
314                         sizeof(ethdev_name), pci_dev);
315
316         eth_dev = rte_eth_dev_allocated(ethdev_name);
317         if (eth_dev == NULL)
318                 return -ENODEV;
319
320         eth_drv = (const struct eth_driver *)pci_dev->driver;
321
322         /* Invoke PMD device uninit function */
323         if (*eth_drv->eth_dev_uninit) {
324                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
325                 if (ret)
326                         return ret;
327         }
328
329         /* free ether device */
330         rte_eth_dev_release_port(eth_dev);
331
332         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
333                 rte_free(eth_dev->data->dev_private);
334
335         eth_dev->pci_dev = NULL;
336         eth_dev->driver = NULL;
337         eth_dev->data = NULL;
338
339         return 0;
340 }
341
342 int
343 rte_eth_dev_is_valid_port(uint8_t port_id)
344 {
345         if (port_id >= RTE_MAX_ETHPORTS ||
346             rte_eth_devices[port_id].attached != DEV_ATTACHED)
347                 return 0;
348         else
349                 return 1;
350 }
351
352 int
353 rte_eth_dev_socket_id(uint8_t port_id)
354 {
355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
356         return rte_eth_devices[port_id].data->numa_node;
357 }
358
359 uint8_t
360 rte_eth_dev_count(void)
361 {
362         return nb_ports;
363 }
364
365 static enum rte_eth_dev_type
366 rte_eth_dev_get_device_type(uint8_t port_id)
367 {
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, RTE_ETH_DEV_UNKNOWN);
369         return rte_eth_devices[port_id].dev_type;
370 }
371
372 static int
373 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
374 {
375         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
376
377         if (addr == NULL) {
378                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
379                 return -EINVAL;
380         }
381
382         *addr = rte_eth_devices[port_id].pci_dev->addr;
383         return 0;
384 }
385
386 int
387 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
388 {
389         char *tmp;
390
391         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
392
393         if (name == NULL) {
394                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
395                 return -EINVAL;
396         }
397
398         /* shouldn't check 'rte_eth_devices[i].data',
399          * because it might be overwritten by VDEV PMD */
400         tmp = rte_eth_dev_data[port_id].name;
401         strcpy(name, tmp);
402         return 0;
403 }
404
405 int
406 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
407 {
408         int i;
409
410         if (name == NULL) {
411                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
412                 return -EINVAL;
413         }
414
415         *port_id = RTE_MAX_ETHPORTS;
416
417         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
418
419                 if (!strncmp(name,
420                         rte_eth_dev_data[i].name, strlen(name))) {
421
422                         *port_id = i;
423
424                         return 0;
425                 }
426         }
427         return -ENODEV;
428 }
429
430 static int
431 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
432 {
433         int i;
434         struct rte_pci_device *pci_dev = NULL;
435
436         if (addr == NULL) {
437                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
438                 return -EINVAL;
439         }
440
441         *port_id = RTE_MAX_ETHPORTS;
442
443         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
444
445                 pci_dev = rte_eth_devices[i].pci_dev;
446
447                 if (pci_dev &&
448                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
449
450                         *port_id = i;
451
452                         return 0;
453                 }
454         }
455         return -ENODEV;
456 }
457
458 static int
459 rte_eth_dev_is_detachable(uint8_t port_id)
460 {
461         uint32_t dev_flags;
462
463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
464
465         switch (rte_eth_devices[port_id].data->kdrv) {
466         case RTE_KDRV_IGB_UIO:
467         case RTE_KDRV_UIO_GENERIC:
468         case RTE_KDRV_NIC_UIO:
469         case RTE_KDRV_NONE:
470                 break;
471         case RTE_KDRV_VFIO:
472         default:
473                 return -ENOTSUP;
474         }
475         dev_flags = rte_eth_devices[port_id].data->dev_flags;
476         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
477                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
478                 return 0;
479         else
480                 return 1;
481 }
482
483 /* attach the new physical device, then store port_id of the device */
484 static int
485 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
486 {
487         /* re-construct pci_device_list */
488         if (rte_eal_pci_scan())
489                 goto err;
490         /* Invoke probe func of the driver can handle the new device. */
491         if (rte_eal_pci_probe_one(addr))
492                 goto err;
493
494         if (rte_eth_dev_get_port_by_addr(addr, port_id))
495                 goto err;
496
497         return 0;
498 err:
499         return -1;
500 }
501
502 /* detach the new physical device, then store pci_addr of the device */
503 static int
504 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
505 {
506         struct rte_pci_addr freed_addr;
507         struct rte_pci_addr vp;
508
509         /* get pci address by port id */
510         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
511                 goto err;
512
513         /* Zeroed pci addr means the port comes from virtual device */
514         vp.domain = vp.bus = vp.devid = vp.function = 0;
515         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
516                 goto err;
517
518         /* invoke remove func of the pci driver,
519          * also remove the device from pci_device_list */
520         if (rte_eal_pci_detach(&freed_addr))
521                 goto err;
522
523         *addr = freed_addr;
524         return 0;
525 err:
526         return -1;
527 }
528
529 /* attach the new virtual device, then store port_id of the device */
530 static int
531 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
532 {
533         char *name = NULL, *args = NULL;
534         int ret = -1;
535
536         /* parse vdevargs, then retrieve device name and args */
537         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
538                 goto end;
539
540         /* walk around dev_driver_list to find the driver of the device,
541          * then invoke probe function of the driver.
542          * rte_eal_vdev_init() updates port_id allocated after
543          * initialization.
544          */
545         if (rte_eal_vdev_init(name, args))
546                 goto end;
547
548         if (rte_eth_dev_get_port_by_name(name, port_id))
549                 goto end;
550
551         ret = 0;
552 end:
553         free(name);
554         free(args);
555
556         return ret;
557 }
558
559 /* detach the new virtual device, then store the name of the device */
560 static int
561 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
562 {
563         char name[RTE_ETH_NAME_MAX_LEN];
564
565         /* get device name by port id */
566         if (rte_eth_dev_get_name_by_port(port_id, name))
567                 goto err;
568         /* walk around dev_driver_list to find the driver of the device,
569          * then invoke uninit function of the driver */
570         if (rte_eal_vdev_uninit(name))
571                 goto err;
572
573         strncpy(vdevname, name, sizeof(name));
574         return 0;
575 err:
576         return -1;
577 }
578
579 /* attach the new device, then store port_id of the device */
580 int
581 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
582 {
583         struct rte_pci_addr addr;
584         int ret = -1;
585
586         if ((devargs == NULL) || (port_id == NULL)) {
587                 ret = -EINVAL;
588                 goto err;
589         }
590
591         if (eal_parse_pci_DomBDF(devargs, &addr) == 0) {
592                 ret = rte_eth_dev_attach_pdev(&addr, port_id);
593                 if (ret < 0)
594                         goto err;
595         } else {
596                 ret = rte_eth_dev_attach_vdev(devargs, port_id);
597                 if (ret < 0)
598                         goto err;
599         }
600
601         return 0;
602 err:
603         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
604         return ret;
605 }
606
607 /* detach the device, then store the name of the device */
608 int
609 rte_eth_dev_detach(uint8_t port_id, char *name)
610 {
611         struct rte_pci_addr addr;
612         int ret = -1;
613
614         if (name == NULL) {
615                 ret = -EINVAL;
616                 goto err;
617         }
618
619         /* check whether the driver supports detach feature, or not */
620         if (rte_eth_dev_is_detachable(port_id))
621                 goto err;
622
623         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
624                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
625                 if (ret < 0)
626                         goto err;
627
628                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
629                 if (ret < 0)
630                         goto err;
631
632                 snprintf(name, RTE_ETH_NAME_MAX_LEN,
633                         "%04x:%02x:%02x.%d",
634                         addr.domain, addr.bus,
635                         addr.devid, addr.function);
636         } else {
637                 ret = rte_eth_dev_detach_vdev(port_id, name);
638                 if (ret < 0)
639                         goto err;
640         }
641
642         return 0;
643
644 err:
645         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
646         return ret;
647 }
648
649 static int
650 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
651 {
652         uint16_t old_nb_queues = dev->data->nb_rx_queues;
653         void **rxq;
654         unsigned i;
655
656         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
657                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
658                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
659                                 RTE_CACHE_LINE_SIZE);
660                 if (dev->data->rx_queues == NULL) {
661                         dev->data->nb_rx_queues = 0;
662                         return -(ENOMEM);
663                 }
664         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
665                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
666
667                 rxq = dev->data->rx_queues;
668
669                 for (i = nb_queues; i < old_nb_queues; i++)
670                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
671                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
672                                 RTE_CACHE_LINE_SIZE);
673                 if (rxq == NULL)
674                         return -(ENOMEM);
675                 if (nb_queues > old_nb_queues) {
676                         uint16_t new_qs = nb_queues - old_nb_queues;
677
678                         memset(rxq + old_nb_queues, 0,
679                                 sizeof(rxq[0]) * new_qs);
680                 }
681
682                 dev->data->rx_queues = rxq;
683
684         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
685                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
686
687                 rxq = dev->data->rx_queues;
688
689                 for (i = nb_queues; i < old_nb_queues; i++)
690                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
691         }
692         dev->data->nb_rx_queues = nb_queues;
693         return 0;
694 }
695
696 int
697 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
698 {
699         struct rte_eth_dev *dev;
700
701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
702
703         dev = &rte_eth_devices[port_id];
704         if (rx_queue_id >= dev->data->nb_rx_queues) {
705                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
706                 return -EINVAL;
707         }
708
709         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
710
711         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
712                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
713                         " already started\n",
714                         rx_queue_id, port_id);
715                 return 0;
716         }
717
718         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
719
720 }
721
722 int
723 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
724 {
725         struct rte_eth_dev *dev;
726
727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
728
729         dev = &rte_eth_devices[port_id];
730         if (rx_queue_id >= dev->data->nb_rx_queues) {
731                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
732                 return -EINVAL;
733         }
734
735         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
736
737         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
738                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
739                         " already stopped\n",
740                         rx_queue_id, port_id);
741                 return 0;
742         }
743
744         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
745
746 }
747
748 int
749 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
750 {
751         struct rte_eth_dev *dev;
752
753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
754
755         dev = &rte_eth_devices[port_id];
756         if (tx_queue_id >= dev->data->nb_tx_queues) {
757                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
758                 return -EINVAL;
759         }
760
761         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
762
763         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
764                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
765                         " already started\n",
766                         tx_queue_id, port_id);
767                 return 0;
768         }
769
770         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
771
772 }
773
774 int
775 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
776 {
777         struct rte_eth_dev *dev;
778
779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
780
781         dev = &rte_eth_devices[port_id];
782         if (tx_queue_id >= dev->data->nb_tx_queues) {
783                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
784                 return -EINVAL;
785         }
786
787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
788
789         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
790                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
791                         " already stopped\n",
792                         tx_queue_id, port_id);
793                 return 0;
794         }
795
796         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
797
798 }
799
800 static int
801 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
802 {
803         uint16_t old_nb_queues = dev->data->nb_tx_queues;
804         void **txq;
805         unsigned i;
806
807         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
808                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
809                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
810                                                    RTE_CACHE_LINE_SIZE);
811                 if (dev->data->tx_queues == NULL) {
812                         dev->data->nb_tx_queues = 0;
813                         return -(ENOMEM);
814                 }
815         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
816                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
817
818                 txq = dev->data->tx_queues;
819
820                 for (i = nb_queues; i < old_nb_queues; i++)
821                         (*dev->dev_ops->tx_queue_release)(txq[i]);
822                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
823                                   RTE_CACHE_LINE_SIZE);
824                 if (txq == NULL)
825                         return -ENOMEM;
826                 if (nb_queues > old_nb_queues) {
827                         uint16_t new_qs = nb_queues - old_nb_queues;
828
829                         memset(txq + old_nb_queues, 0,
830                                sizeof(txq[0]) * new_qs);
831                 }
832
833                 dev->data->tx_queues = txq;
834
835         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
836                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
837
838                 txq = dev->data->tx_queues;
839
840                 for (i = nb_queues; i < old_nb_queues; i++)
841                         (*dev->dev_ops->tx_queue_release)(txq[i]);
842         }
843         dev->data->nb_tx_queues = nb_queues;
844         return 0;
845 }
846
847 uint32_t
848 rte_eth_speed_bitflag(uint32_t speed, int duplex)
849 {
850         switch (speed) {
851         case ETH_SPEED_NUM_10M:
852                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
853         case ETH_SPEED_NUM_100M:
854                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
855         case ETH_SPEED_NUM_1G:
856                 return ETH_LINK_SPEED_1G;
857         case ETH_SPEED_NUM_2_5G:
858                 return ETH_LINK_SPEED_2_5G;
859         case ETH_SPEED_NUM_5G:
860                 return ETH_LINK_SPEED_5G;
861         case ETH_SPEED_NUM_10G:
862                 return ETH_LINK_SPEED_10G;
863         case ETH_SPEED_NUM_20G:
864                 return ETH_LINK_SPEED_20G;
865         case ETH_SPEED_NUM_25G:
866                 return ETH_LINK_SPEED_25G;
867         case ETH_SPEED_NUM_40G:
868                 return ETH_LINK_SPEED_40G;
869         case ETH_SPEED_NUM_50G:
870                 return ETH_LINK_SPEED_50G;
871         case ETH_SPEED_NUM_56G:
872                 return ETH_LINK_SPEED_56G;
873         case ETH_SPEED_NUM_100G:
874                 return ETH_LINK_SPEED_100G;
875         default:
876                 return 0;
877         }
878 }
879
880 int
881 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
882                       const struct rte_eth_conf *dev_conf)
883 {
884         struct rte_eth_dev *dev;
885         struct rte_eth_dev_info dev_info;
886         int diag;
887
888         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
889
890         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
891                 RTE_PMD_DEBUG_TRACE(
892                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
893                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
894                 return -EINVAL;
895         }
896
897         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
898                 RTE_PMD_DEBUG_TRACE(
899                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
900                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
901                 return -EINVAL;
902         }
903
904         dev = &rte_eth_devices[port_id];
905
906         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
907         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
908
909         if (dev->data->dev_started) {
910                 RTE_PMD_DEBUG_TRACE(
911                     "port %d must be stopped to allow configuration\n", port_id);
912                 return -EBUSY;
913         }
914
915         /* Copy the dev_conf parameter into the dev structure */
916         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
917
918         /*
919          * Check that the numbers of RX and TX queues are not greater
920          * than the maximum number of RX and TX queues supported by the
921          * configured device.
922          */
923         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
924
925         if (nb_rx_q == 0 && nb_tx_q == 0) {
926                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
927                 return -EINVAL;
928         }
929
930         if (nb_rx_q > dev_info.max_rx_queues) {
931                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
932                                 port_id, nb_rx_q, dev_info.max_rx_queues);
933                 return -EINVAL;
934         }
935
936         if (nb_tx_q > dev_info.max_tx_queues) {
937                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
938                                 port_id, nb_tx_q, dev_info.max_tx_queues);
939                 return -EINVAL;
940         }
941
942         /*
943          * If link state interrupt is enabled, check that the
944          * device supports it.
945          */
946         if ((dev_conf->intr_conf.lsc == 1) &&
947                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
948                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
949                                         dev->data->drv_name);
950                         return -EINVAL;
951         }
952
953         /*
954          * If jumbo frames are enabled, check that the maximum RX packet
955          * length is supported by the configured device.
956          */
957         if (dev_conf->rxmode.jumbo_frame == 1) {
958                 if (dev_conf->rxmode.max_rx_pkt_len >
959                     dev_info.max_rx_pktlen) {
960                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
961                                 " > max valid value %u\n",
962                                 port_id,
963                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
964                                 (unsigned)dev_info.max_rx_pktlen);
965                         return -EINVAL;
966                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
967                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
968                                 " < min valid value %u\n",
969                                 port_id,
970                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
971                                 (unsigned)ETHER_MIN_LEN);
972                         return -EINVAL;
973                 }
974         } else {
975                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
976                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
977                         /* Use default value */
978                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
979                                                         ETHER_MAX_LEN;
980         }
981
982         /*
983          * Setup new number of RX/TX queues and reconfigure device.
984          */
985         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
986         if (diag != 0) {
987                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
988                                 port_id, diag);
989                 return diag;
990         }
991
992         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
993         if (diag != 0) {
994                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
995                                 port_id, diag);
996                 rte_eth_dev_rx_queue_config(dev, 0);
997                 return diag;
998         }
999
1000         diag = (*dev->dev_ops->dev_configure)(dev);
1001         if (diag != 0) {
1002                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1003                                 port_id, diag);
1004                 rte_eth_dev_rx_queue_config(dev, 0);
1005                 rte_eth_dev_tx_queue_config(dev, 0);
1006                 return diag;
1007         }
1008
1009         return 0;
1010 }
1011
1012 static void
1013 rte_eth_dev_config_restore(uint8_t port_id)
1014 {
1015         struct rte_eth_dev *dev;
1016         struct rte_eth_dev_info dev_info;
1017         struct ether_addr addr;
1018         uint16_t i;
1019         uint32_t pool = 0;
1020
1021         dev = &rte_eth_devices[port_id];
1022
1023         rte_eth_dev_info_get(port_id, &dev_info);
1024
1025         if (RTE_ETH_DEV_SRIOV(dev).active)
1026                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1027
1028         /* replay MAC address configuration */
1029         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1030                 addr = dev->data->mac_addrs[i];
1031
1032                 /* skip zero address */
1033                 if (is_zero_ether_addr(&addr))
1034                         continue;
1035
1036                 /* add address to the hardware */
1037                 if  (*dev->dev_ops->mac_addr_add &&
1038                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1039                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1040                 else {
1041                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1042                                         port_id);
1043                         /* exit the loop but not return an error */
1044                         break;
1045                 }
1046         }
1047
1048         /* replay promiscuous configuration */
1049         if (rte_eth_promiscuous_get(port_id) == 1)
1050                 rte_eth_promiscuous_enable(port_id);
1051         else if (rte_eth_promiscuous_get(port_id) == 0)
1052                 rte_eth_promiscuous_disable(port_id);
1053
1054         /* replay all multicast configuration */
1055         if (rte_eth_allmulticast_get(port_id) == 1)
1056                 rte_eth_allmulticast_enable(port_id);
1057         else if (rte_eth_allmulticast_get(port_id) == 0)
1058                 rte_eth_allmulticast_disable(port_id);
1059 }
1060
1061 int
1062 rte_eth_dev_start(uint8_t port_id)
1063 {
1064         struct rte_eth_dev *dev;
1065         int diag;
1066
1067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1068
1069         dev = &rte_eth_devices[port_id];
1070
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1072
1073         if (dev->data->dev_started != 0) {
1074                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1075                         " already started\n",
1076                         port_id);
1077                 return 0;
1078         }
1079
1080         diag = (*dev->dev_ops->dev_start)(dev);
1081         if (diag == 0)
1082                 dev->data->dev_started = 1;
1083         else
1084                 return diag;
1085
1086         rte_eth_dev_config_restore(port_id);
1087
1088         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1089                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1090                 (*dev->dev_ops->link_update)(dev, 0);
1091         }
1092         return 0;
1093 }
1094
1095 void
1096 rte_eth_dev_stop(uint8_t port_id)
1097 {
1098         struct rte_eth_dev *dev;
1099
1100         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1101         dev = &rte_eth_devices[port_id];
1102
1103         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1104
1105         if (dev->data->dev_started == 0) {
1106                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1107                         " already stopped\n",
1108                         port_id);
1109                 return;
1110         }
1111
1112         dev->data->dev_started = 0;
1113         (*dev->dev_ops->dev_stop)(dev);
1114 }
1115
1116 int
1117 rte_eth_dev_set_link_up(uint8_t port_id)
1118 {
1119         struct rte_eth_dev *dev;
1120
1121         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1122
1123         dev = &rte_eth_devices[port_id];
1124
1125         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1126         return (*dev->dev_ops->dev_set_link_up)(dev);
1127 }
1128
1129 int
1130 rte_eth_dev_set_link_down(uint8_t port_id)
1131 {
1132         struct rte_eth_dev *dev;
1133
1134         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1135
1136         dev = &rte_eth_devices[port_id];
1137
1138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1139         return (*dev->dev_ops->dev_set_link_down)(dev);
1140 }
1141
1142 void
1143 rte_eth_dev_close(uint8_t port_id)
1144 {
1145         struct rte_eth_dev *dev;
1146
1147         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1148         dev = &rte_eth_devices[port_id];
1149
1150         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1151         dev->data->dev_started = 0;
1152         (*dev->dev_ops->dev_close)(dev);
1153
1154         rte_free(dev->data->rx_queues);
1155         dev->data->rx_queues = NULL;
1156         rte_free(dev->data->tx_queues);
1157         dev->data->tx_queues = NULL;
1158 }
1159
1160 int
1161 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1162                        uint16_t nb_rx_desc, unsigned int socket_id,
1163                        const struct rte_eth_rxconf *rx_conf,
1164                        struct rte_mempool *mp)
1165 {
1166         int ret;
1167         uint32_t mbp_buf_size;
1168         struct rte_eth_dev *dev;
1169         struct rte_eth_dev_info dev_info;
1170
1171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1172
1173         dev = &rte_eth_devices[port_id];
1174         if (rx_queue_id >= dev->data->nb_rx_queues) {
1175                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1176                 return -EINVAL;
1177         }
1178
1179         if (dev->data->dev_started) {
1180                 RTE_PMD_DEBUG_TRACE(
1181                     "port %d must be stopped to allow configuration\n", port_id);
1182                 return -EBUSY;
1183         }
1184
1185         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1186         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1187
1188         /*
1189          * Check the size of the mbuf data buffer.
1190          * This value must be provided in the private data of the memory pool.
1191          * First check that the memory pool has a valid private data.
1192          */
1193         rte_eth_dev_info_get(port_id, &dev_info);
1194         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1195                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1196                                 mp->name, (int) mp->private_data_size,
1197                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1198                 return -ENOSPC;
1199         }
1200         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1201
1202         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1203                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1204                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1205                                 "=%d)\n",
1206                                 mp->name,
1207                                 (int)mbp_buf_size,
1208                                 (int)(RTE_PKTMBUF_HEADROOM +
1209                                       dev_info.min_rx_bufsize),
1210                                 (int)RTE_PKTMBUF_HEADROOM,
1211                                 (int)dev_info.min_rx_bufsize);
1212                 return -EINVAL;
1213         }
1214
1215         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1216                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1217                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1218
1219                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1220                         "should be: <= %hu, = %hu, and a product of %hu\n",
1221                         nb_rx_desc,
1222                         dev_info.rx_desc_lim.nb_max,
1223                         dev_info.rx_desc_lim.nb_min,
1224                         dev_info.rx_desc_lim.nb_align);
1225                 return -EINVAL;
1226         }
1227
1228         if (rx_conf == NULL)
1229                 rx_conf = &dev_info.default_rxconf;
1230
1231         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1232                                               socket_id, rx_conf, mp);
1233         if (!ret) {
1234                 if (!dev->data->min_rx_buf_size ||
1235                     dev->data->min_rx_buf_size > mbp_buf_size)
1236                         dev->data->min_rx_buf_size = mbp_buf_size;
1237         }
1238
1239         return ret;
1240 }
1241
1242 int
1243 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1244                        uint16_t nb_tx_desc, unsigned int socket_id,
1245                        const struct rte_eth_txconf *tx_conf)
1246 {
1247         struct rte_eth_dev *dev;
1248         struct rte_eth_dev_info dev_info;
1249
1250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1251
1252         dev = &rte_eth_devices[port_id];
1253         if (tx_queue_id >= dev->data->nb_tx_queues) {
1254                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1255                 return -EINVAL;
1256         }
1257
1258         if (dev->data->dev_started) {
1259                 RTE_PMD_DEBUG_TRACE(
1260                     "port %d must be stopped to allow configuration\n", port_id);
1261                 return -EBUSY;
1262         }
1263
1264         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1266
1267         rte_eth_dev_info_get(port_id, &dev_info);
1268
1269         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1270             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1271             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1272                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1273                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1274                                 nb_tx_desc,
1275                                 dev_info.tx_desc_lim.nb_max,
1276                                 dev_info.tx_desc_lim.nb_min,
1277                                 dev_info.tx_desc_lim.nb_align);
1278                 return -EINVAL;
1279         }
1280
1281         if (tx_conf == NULL)
1282                 tx_conf = &dev_info.default_txconf;
1283
1284         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1285                                                socket_id, tx_conf);
1286 }
1287
1288 void
1289 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1290                 void *userdata __rte_unused)
1291 {
1292         unsigned i;
1293
1294         for (i = 0; i < unsent; i++)
1295                 rte_pktmbuf_free(pkts[i]);
1296 }
1297
1298 void
1299 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1300                 void *userdata)
1301 {
1302         uint64_t *count = userdata;
1303         unsigned i;
1304
1305         for (i = 0; i < unsent; i++)
1306                 rte_pktmbuf_free(pkts[i]);
1307
1308         *count += unsent;
1309 }
1310
1311 int
1312 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1313                 buffer_tx_error_fn cbfn, void *userdata)
1314 {
1315         buffer->error_callback = cbfn;
1316         buffer->error_userdata = userdata;
1317         return 0;
1318 }
1319
1320 int
1321 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1322 {
1323         int ret = 0;
1324
1325         if (buffer == NULL)
1326                 return -EINVAL;
1327
1328         buffer->size = size;
1329         if (buffer->error_callback == NULL) {
1330                 ret = rte_eth_tx_buffer_set_err_callback(
1331                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1332         }
1333
1334         return ret;
1335 }
1336
1337 void
1338 rte_eth_promiscuous_enable(uint8_t port_id)
1339 {
1340         struct rte_eth_dev *dev;
1341
1342         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1343         dev = &rte_eth_devices[port_id];
1344
1345         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1346         (*dev->dev_ops->promiscuous_enable)(dev);
1347         dev->data->promiscuous = 1;
1348 }
1349
1350 void
1351 rte_eth_promiscuous_disable(uint8_t port_id)
1352 {
1353         struct rte_eth_dev *dev;
1354
1355         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1356         dev = &rte_eth_devices[port_id];
1357
1358         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1359         dev->data->promiscuous = 0;
1360         (*dev->dev_ops->promiscuous_disable)(dev);
1361 }
1362
1363 int
1364 rte_eth_promiscuous_get(uint8_t port_id)
1365 {
1366         struct rte_eth_dev *dev;
1367
1368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1369
1370         dev = &rte_eth_devices[port_id];
1371         return dev->data->promiscuous;
1372 }
1373
1374 void
1375 rte_eth_allmulticast_enable(uint8_t port_id)
1376 {
1377         struct rte_eth_dev *dev;
1378
1379         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1380         dev = &rte_eth_devices[port_id];
1381
1382         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1383         (*dev->dev_ops->allmulticast_enable)(dev);
1384         dev->data->all_multicast = 1;
1385 }
1386
1387 void
1388 rte_eth_allmulticast_disable(uint8_t port_id)
1389 {
1390         struct rte_eth_dev *dev;
1391
1392         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1393         dev = &rte_eth_devices[port_id];
1394
1395         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1396         dev->data->all_multicast = 0;
1397         (*dev->dev_ops->allmulticast_disable)(dev);
1398 }
1399
1400 int
1401 rte_eth_allmulticast_get(uint8_t port_id)
1402 {
1403         struct rte_eth_dev *dev;
1404
1405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1406
1407         dev = &rte_eth_devices[port_id];
1408         return dev->data->all_multicast;
1409 }
1410
1411 static inline int
1412 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1413                                 struct rte_eth_link *link)
1414 {
1415         struct rte_eth_link *dst = link;
1416         struct rte_eth_link *src = &(dev->data->dev_link);
1417
1418         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1419                                         *(uint64_t *)src) == 0)
1420                 return -1;
1421
1422         return 0;
1423 }
1424
1425 void
1426 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1427 {
1428         struct rte_eth_dev *dev;
1429
1430         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1431         dev = &rte_eth_devices[port_id];
1432
1433         if (dev->data->dev_conf.intr_conf.lsc != 0)
1434                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1435         else {
1436                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1437                 (*dev->dev_ops->link_update)(dev, 1);
1438                 *eth_link = dev->data->dev_link;
1439         }
1440 }
1441
1442 void
1443 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1444 {
1445         struct rte_eth_dev *dev;
1446
1447         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1448         dev = &rte_eth_devices[port_id];
1449
1450         if (dev->data->dev_conf.intr_conf.lsc != 0)
1451                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1452         else {
1453                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1454                 (*dev->dev_ops->link_update)(dev, 0);
1455                 *eth_link = dev->data->dev_link;
1456         }
1457 }
1458
1459 int
1460 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1461 {
1462         struct rte_eth_dev *dev;
1463
1464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1465
1466         dev = &rte_eth_devices[port_id];
1467         memset(stats, 0, sizeof(*stats));
1468
1469         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1470         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1471         (*dev->dev_ops->stats_get)(dev, stats);
1472         return 0;
1473 }
1474
1475 void
1476 rte_eth_stats_reset(uint8_t port_id)
1477 {
1478         struct rte_eth_dev *dev;
1479
1480         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1481         dev = &rte_eth_devices[port_id];
1482
1483         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1484         (*dev->dev_ops->stats_reset)(dev);
1485         dev->data->rx_mbuf_alloc_failed = 0;
1486 }
1487
1488 static int
1489 get_xstats_count(uint8_t port_id)
1490 {
1491         struct rte_eth_dev *dev;
1492         int count;
1493
1494         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1495         dev = &rte_eth_devices[port_id];
1496         if (dev->dev_ops->xstats_get_names != NULL) {
1497                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1498                 if (count < 0)
1499                         return count;
1500         } else
1501                 count = 0;
1502         count += RTE_NB_STATS;
1503         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1504         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1505         return count;
1506 }
1507
1508 int
1509 rte_eth_xstats_get_names(uint8_t port_id,
1510         struct rte_eth_xstat_name *xstats_names,
1511         unsigned size)
1512 {
1513         struct rte_eth_dev *dev;
1514         int cnt_used_entries;
1515         int cnt_expected_entries;
1516         int cnt_driver_entries;
1517         uint32_t idx, id_queue;
1518
1519         cnt_expected_entries = get_xstats_count(port_id);
1520         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1521                         (int)size < cnt_expected_entries)
1522                 return cnt_expected_entries;
1523
1524         /* port_id checked in get_xstats_count() */
1525         dev = &rte_eth_devices[port_id];
1526         cnt_used_entries = 0;
1527
1528         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1529                 snprintf(xstats_names[cnt_used_entries].name,
1530                         sizeof(xstats_names[0].name),
1531                         "%s", rte_stats_strings[idx].name);
1532                 cnt_used_entries++;
1533         }
1534         for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
1535                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1536                         snprintf(xstats_names[cnt_used_entries].name,
1537                                 sizeof(xstats_names[0].name),
1538                                 "rx_q%u%s",
1539                                 id_queue, rte_rxq_stats_strings[idx].name);
1540                         cnt_used_entries++;
1541                 }
1542
1543         }
1544         for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
1545                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1546                         snprintf(xstats_names[cnt_used_entries].name,
1547                                 sizeof(xstats_names[0].name),
1548                                 "tx_q%u%s",
1549                                 id_queue, rte_txq_stats_strings[idx].name);
1550                         cnt_used_entries++;
1551                 }
1552         }
1553
1554         if (dev->dev_ops->xstats_get_names != NULL) {
1555                 /* If there are any driver-specific xstats, append them
1556                  * to end of list.
1557                  */
1558                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1559                         dev,
1560                         xstats_names + cnt_used_entries,
1561                         size - cnt_used_entries);
1562                 if (cnt_driver_entries < 0)
1563                         return cnt_driver_entries;
1564                 cnt_used_entries += cnt_driver_entries;
1565         }
1566
1567         return cnt_used_entries;
1568 }
1569
1570 /* retrieve ethdev extended statistics */
1571 int
1572 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1573         unsigned n)
1574 {
1575         struct rte_eth_stats eth_stats;
1576         struct rte_eth_dev *dev;
1577         unsigned count = 0, i, q;
1578         signed xcount = 0;
1579         uint64_t val, *stats_ptr;
1580
1581         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1582
1583         dev = &rte_eth_devices[port_id];
1584
1585         /* Return generic statistics */
1586         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1587                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1588
1589         /* implemented by the driver */
1590         if (dev->dev_ops->xstats_get != NULL) {
1591                 /* Retrieve the xstats from the driver at the end of the
1592                  * xstats struct.
1593                  */
1594                 xcount = (*dev->dev_ops->xstats_get)(dev,
1595                                      xstats ? xstats + count : NULL,
1596                                      (n > count) ? n - count : 0);
1597
1598                 if (xcount < 0)
1599                         return xcount;
1600         }
1601
1602         if (n < count + xcount || xstats == NULL)
1603                 return count + xcount;
1604
1605         /* now fill the xstats structure */
1606         count = 0;
1607         rte_eth_stats_get(port_id, &eth_stats);
1608
1609         /* global stats */
1610         for (i = 0; i < RTE_NB_STATS; i++) {
1611                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1612                                         rte_stats_strings[i].offset);
1613                 val = *stats_ptr;
1614                 xstats[count++].value = val;
1615         }
1616
1617         /* per-rxq stats */
1618         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1619                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1620                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1621                                         rte_rxq_stats_strings[i].offset +
1622                                         q * sizeof(uint64_t));
1623                         val = *stats_ptr;
1624                         xstats[count++].value = val;
1625                 }
1626         }
1627
1628         /* per-txq stats */
1629         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1630                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1631                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1632                                         rte_txq_stats_strings[i].offset +
1633                                         q * sizeof(uint64_t));
1634                         val = *stats_ptr;
1635                         xstats[count++].value = val;
1636                 }
1637         }
1638
1639         for (i = 0; i < count + xcount; i++)
1640                 xstats[i].id = i;
1641
1642         return count + xcount;
1643 }
1644
1645 /* reset ethdev extended statistics */
1646 void
1647 rte_eth_xstats_reset(uint8_t port_id)
1648 {
1649         struct rte_eth_dev *dev;
1650
1651         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1652         dev = &rte_eth_devices[port_id];
1653
1654         /* implemented by the driver */
1655         if (dev->dev_ops->xstats_reset != NULL) {
1656                 (*dev->dev_ops->xstats_reset)(dev);
1657                 return;
1658         }
1659
1660         /* fallback to default */
1661         rte_eth_stats_reset(port_id);
1662 }
1663
1664 static int
1665 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1666                 uint8_t is_rx)
1667 {
1668         struct rte_eth_dev *dev;
1669
1670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1671
1672         dev = &rte_eth_devices[port_id];
1673
1674         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1675         return (*dev->dev_ops->queue_stats_mapping_set)
1676                         (dev, queue_id, stat_idx, is_rx);
1677 }
1678
1679
1680 int
1681 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1682                 uint8_t stat_idx)
1683 {
1684         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1685                         STAT_QMAP_TX);
1686 }
1687
1688
1689 int
1690 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1691                 uint8_t stat_idx)
1692 {
1693         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1694                         STAT_QMAP_RX);
1695 }
1696
1697 void
1698 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1699 {
1700         struct rte_eth_dev *dev;
1701         const struct rte_eth_desc_lim lim = {
1702                 .nb_max = UINT16_MAX,
1703                 .nb_min = 0,
1704                 .nb_align = 1,
1705         };
1706
1707         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1708         dev = &rte_eth_devices[port_id];
1709
1710         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1711         dev_info->rx_desc_lim = lim;
1712         dev_info->tx_desc_lim = lim;
1713
1714         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1715         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1716         dev_info->pci_dev = dev->pci_dev;
1717         dev_info->driver_name = dev->data->drv_name;
1718         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1719         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1720 }
1721
1722 int
1723 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1724                                  uint32_t *ptypes, int num)
1725 {
1726         int i, j;
1727         struct rte_eth_dev *dev;
1728         const uint32_t *all_ptypes;
1729
1730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1731         dev = &rte_eth_devices[port_id];
1732         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1733         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1734
1735         if (!all_ptypes)
1736                 return 0;
1737
1738         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1739                 if (all_ptypes[i] & ptype_mask) {
1740                         if (j < num)
1741                                 ptypes[j] = all_ptypes[i];
1742                         j++;
1743                 }
1744
1745         return j;
1746 }
1747
1748 void
1749 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1750 {
1751         struct rte_eth_dev *dev;
1752
1753         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1754         dev = &rte_eth_devices[port_id];
1755         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1756 }
1757
1758
1759 int
1760 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1761 {
1762         struct rte_eth_dev *dev;
1763
1764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1765
1766         dev = &rte_eth_devices[port_id];
1767         *mtu = dev->data->mtu;
1768         return 0;
1769 }
1770
1771 int
1772 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1773 {
1774         int ret;
1775         struct rte_eth_dev *dev;
1776
1777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1778         dev = &rte_eth_devices[port_id];
1779         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1780
1781         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1782         if (!ret)
1783                 dev->data->mtu = mtu;
1784
1785         return ret;
1786 }
1787
1788 int
1789 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1790 {
1791         struct rte_eth_dev *dev;
1792
1793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1794         dev = &rte_eth_devices[port_id];
1795         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1796                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1797                 return -ENOSYS;
1798         }
1799
1800         if (vlan_id > 4095) {
1801                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1802                                 port_id, (unsigned) vlan_id);
1803                 return -EINVAL;
1804         }
1805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1806
1807         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1808 }
1809
1810 int
1811 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1812 {
1813         struct rte_eth_dev *dev;
1814
1815         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1816         dev = &rte_eth_devices[port_id];
1817         if (rx_queue_id >= dev->data->nb_rx_queues) {
1818                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1819                 return -EINVAL;
1820         }
1821
1822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1823         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1824
1825         return 0;
1826 }
1827
1828 int
1829 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1830                                 enum rte_vlan_type vlan_type,
1831                                 uint16_t tpid)
1832 {
1833         struct rte_eth_dev *dev;
1834
1835         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1836         dev = &rte_eth_devices[port_id];
1837         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1838
1839         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1840 }
1841
1842 int
1843 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1844 {
1845         struct rte_eth_dev *dev;
1846         int ret = 0;
1847         int mask = 0;
1848         int cur, org = 0;
1849
1850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1851         dev = &rte_eth_devices[port_id];
1852
1853         /*check which option changed by application*/
1854         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1855         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1856         if (cur != org) {
1857                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1858                 mask |= ETH_VLAN_STRIP_MASK;
1859         }
1860
1861         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1862         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1863         if (cur != org) {
1864                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1865                 mask |= ETH_VLAN_FILTER_MASK;
1866         }
1867
1868         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1869         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1870         if (cur != org) {
1871                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1872                 mask |= ETH_VLAN_EXTEND_MASK;
1873         }
1874
1875         /*no change*/
1876         if (mask == 0)
1877                 return ret;
1878
1879         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1880         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1881
1882         return ret;
1883 }
1884
1885 int
1886 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1887 {
1888         struct rte_eth_dev *dev;
1889         int ret = 0;
1890
1891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1892         dev = &rte_eth_devices[port_id];
1893
1894         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1895                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1896
1897         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1898                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1899
1900         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1901                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1902
1903         return ret;
1904 }
1905
1906 int
1907 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1908 {
1909         struct rte_eth_dev *dev;
1910
1911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1912         dev = &rte_eth_devices[port_id];
1913         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1914         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1915
1916         return 0;
1917 }
1918
1919 int
1920 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1921 {
1922         struct rte_eth_dev *dev;
1923
1924         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1925         dev = &rte_eth_devices[port_id];
1926         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1927         memset(fc_conf, 0, sizeof(*fc_conf));
1928         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1929 }
1930
1931 int
1932 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1933 {
1934         struct rte_eth_dev *dev;
1935
1936         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1937         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1938                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1939                 return -EINVAL;
1940         }
1941
1942         dev = &rte_eth_devices[port_id];
1943         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1944         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1945 }
1946
1947 int
1948 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1949 {
1950         struct rte_eth_dev *dev;
1951
1952         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1953         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1954                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1955                 return -EINVAL;
1956         }
1957
1958         dev = &rte_eth_devices[port_id];
1959         /* High water, low water validation are device specific */
1960         if  (*dev->dev_ops->priority_flow_ctrl_set)
1961                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1962         return -ENOTSUP;
1963 }
1964
1965 static int
1966 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1967                         uint16_t reta_size)
1968 {
1969         uint16_t i, num;
1970
1971         if (!reta_conf)
1972                 return -EINVAL;
1973
1974         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1975                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1976                                                         RTE_RETA_GROUP_SIZE);
1977                 return -EINVAL;
1978         }
1979
1980         num = reta_size / RTE_RETA_GROUP_SIZE;
1981         for (i = 0; i < num; i++) {
1982                 if (reta_conf[i].mask)
1983                         return 0;
1984         }
1985
1986         return -EINVAL;
1987 }
1988
1989 static int
1990 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1991                          uint16_t reta_size,
1992                          uint16_t max_rxq)
1993 {
1994         uint16_t i, idx, shift;
1995
1996         if (!reta_conf)
1997                 return -EINVAL;
1998
1999         if (max_rxq == 0) {
2000                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2001                 return -EINVAL;
2002         }
2003
2004         for (i = 0; i < reta_size; i++) {
2005                 idx = i / RTE_RETA_GROUP_SIZE;
2006                 shift = i % RTE_RETA_GROUP_SIZE;
2007                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2008                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2009                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2010                                 "the maximum rxq index: %u\n", idx, shift,
2011                                 reta_conf[idx].reta[shift], max_rxq);
2012                         return -EINVAL;
2013                 }
2014         }
2015
2016         return 0;
2017 }
2018
2019 int
2020 rte_eth_dev_rss_reta_update(uint8_t port_id,
2021                             struct rte_eth_rss_reta_entry64 *reta_conf,
2022                             uint16_t reta_size)
2023 {
2024         struct rte_eth_dev *dev;
2025         int ret;
2026
2027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2028         /* Check mask bits */
2029         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2030         if (ret < 0)
2031                 return ret;
2032
2033         dev = &rte_eth_devices[port_id];
2034
2035         /* Check entry value */
2036         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2037                                 dev->data->nb_rx_queues);
2038         if (ret < 0)
2039                 return ret;
2040
2041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2042         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2043 }
2044
2045 int
2046 rte_eth_dev_rss_reta_query(uint8_t port_id,
2047                            struct rte_eth_rss_reta_entry64 *reta_conf,
2048                            uint16_t reta_size)
2049 {
2050         struct rte_eth_dev *dev;
2051         int ret;
2052
2053         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2054
2055         /* Check mask bits */
2056         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2057         if (ret < 0)
2058                 return ret;
2059
2060         dev = &rte_eth_devices[port_id];
2061         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2062         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2063 }
2064
2065 int
2066 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2067 {
2068         struct rte_eth_dev *dev;
2069         uint16_t rss_hash_protos;
2070
2071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2072         rss_hash_protos = rss_conf->rss_hf;
2073         if ((rss_hash_protos != 0) &&
2074             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2075                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2076                                 rss_hash_protos);
2077                 return -EINVAL;
2078         }
2079         dev = &rte_eth_devices[port_id];
2080         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2081         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2082 }
2083
2084 int
2085 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2086                               struct rte_eth_rss_conf *rss_conf)
2087 {
2088         struct rte_eth_dev *dev;
2089
2090         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2091         dev = &rte_eth_devices[port_id];
2092         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2093         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2094 }
2095
2096 int
2097 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
2098                                 struct rte_eth_udp_tunnel *udp_tunnel)
2099 {
2100         struct rte_eth_dev *dev;
2101
2102         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2103         if (udp_tunnel == NULL) {
2104                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2105                 return -EINVAL;
2106         }
2107
2108         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2109                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2110                 return -EINVAL;
2111         }
2112
2113         dev = &rte_eth_devices[port_id];
2114         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2115         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2116 }
2117
2118 int
2119 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2120                                    struct rte_eth_udp_tunnel *udp_tunnel)
2121 {
2122         struct rte_eth_dev *dev;
2123
2124         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2125         dev = &rte_eth_devices[port_id];
2126
2127         if (udp_tunnel == NULL) {
2128                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2129                 return -EINVAL;
2130         }
2131
2132         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2133                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2134                 return -EINVAL;
2135         }
2136
2137         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2138         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2139 }
2140
2141 int
2142 rte_eth_led_on(uint8_t port_id)
2143 {
2144         struct rte_eth_dev *dev;
2145
2146         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2147         dev = &rte_eth_devices[port_id];
2148         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2149         return (*dev->dev_ops->dev_led_on)(dev);
2150 }
2151
2152 int
2153 rte_eth_led_off(uint8_t port_id)
2154 {
2155         struct rte_eth_dev *dev;
2156
2157         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2158         dev = &rte_eth_devices[port_id];
2159         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2160         return (*dev->dev_ops->dev_led_off)(dev);
2161 }
2162
2163 /*
2164  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2165  * an empty spot.
2166  */
2167 static int
2168 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2169 {
2170         struct rte_eth_dev_info dev_info;
2171         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2172         unsigned i;
2173
2174         rte_eth_dev_info_get(port_id, &dev_info);
2175
2176         for (i = 0; i < dev_info.max_mac_addrs; i++)
2177                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2178                         return i;
2179
2180         return -1;
2181 }
2182
2183 static const struct ether_addr null_mac_addr;
2184
2185 int
2186 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2187                         uint32_t pool)
2188 {
2189         struct rte_eth_dev *dev;
2190         int index;
2191         uint64_t pool_mask;
2192
2193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2194         dev = &rte_eth_devices[port_id];
2195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2196
2197         if (is_zero_ether_addr(addr)) {
2198                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2199                         port_id);
2200                 return -EINVAL;
2201         }
2202         if (pool >= ETH_64_POOLS) {
2203                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2204                 return -EINVAL;
2205         }
2206
2207         index = get_mac_addr_index(port_id, addr);
2208         if (index < 0) {
2209                 index = get_mac_addr_index(port_id, &null_mac_addr);
2210                 if (index < 0) {
2211                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2212                                 port_id);
2213                         return -ENOSPC;
2214                 }
2215         } else {
2216                 pool_mask = dev->data->mac_pool_sel[index];
2217
2218                 /* Check if both MAC address and pool is already there, and do nothing */
2219                 if (pool_mask & (1ULL << pool))
2220                         return 0;
2221         }
2222
2223         /* Update NIC */
2224         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2225
2226         /* Update address in NIC data structure */
2227         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2228
2229         /* Update pool bitmap in NIC data structure */
2230         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2231
2232         return 0;
2233 }
2234
2235 int
2236 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2237 {
2238         struct rte_eth_dev *dev;
2239         int index;
2240
2241         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2242         dev = &rte_eth_devices[port_id];
2243         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2244
2245         index = get_mac_addr_index(port_id, addr);
2246         if (index == 0) {
2247                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2248                 return -EADDRINUSE;
2249         } else if (index < 0)
2250                 return 0;  /* Do nothing if address wasn't found */
2251
2252         /* Update NIC */
2253         (*dev->dev_ops->mac_addr_remove)(dev, index);
2254
2255         /* Update address in NIC data structure */
2256         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2257
2258         /* reset pool bitmap */
2259         dev->data->mac_pool_sel[index] = 0;
2260
2261         return 0;
2262 }
2263
2264 int
2265 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2266 {
2267         struct rte_eth_dev *dev;
2268
2269         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2270
2271         if (!is_valid_assigned_ether_addr(addr))
2272                 return -EINVAL;
2273
2274         dev = &rte_eth_devices[port_id];
2275         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2276
2277         /* Update default address in NIC data structure */
2278         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2279
2280         (*dev->dev_ops->mac_addr_set)(dev, addr);
2281
2282         return 0;
2283 }
2284
2285 int
2286 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2287                                 uint16_t rx_mode, uint8_t on)
2288 {
2289         uint16_t num_vfs;
2290         struct rte_eth_dev *dev;
2291         struct rte_eth_dev_info dev_info;
2292
2293         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2294
2295         dev = &rte_eth_devices[port_id];
2296         rte_eth_dev_info_get(port_id, &dev_info);
2297
2298         num_vfs = dev_info.max_vfs;
2299         if (vf > num_vfs) {
2300                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2301                 return -EINVAL;
2302         }
2303
2304         if (rx_mode == 0) {
2305                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2306                 return -EINVAL;
2307         }
2308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2309         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2310 }
2311
2312 /*
2313  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2314  * an empty spot.
2315  */
2316 static int
2317 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2318 {
2319         struct rte_eth_dev_info dev_info;
2320         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2321         unsigned i;
2322
2323         rte_eth_dev_info_get(port_id, &dev_info);
2324         if (!dev->data->hash_mac_addrs)
2325                 return -1;
2326
2327         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2328                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2329                         ETHER_ADDR_LEN) == 0)
2330                         return i;
2331
2332         return -1;
2333 }
2334
2335 int
2336 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2337                                 uint8_t on)
2338 {
2339         int index;
2340         int ret;
2341         struct rte_eth_dev *dev;
2342
2343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2344
2345         dev = &rte_eth_devices[port_id];
2346         if (is_zero_ether_addr(addr)) {
2347                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2348                         port_id);
2349                 return -EINVAL;
2350         }
2351
2352         index = get_hash_mac_addr_index(port_id, addr);
2353         /* Check if it's already there, and do nothing */
2354         if ((index >= 0) && (on))
2355                 return 0;
2356
2357         if (index < 0) {
2358                 if (!on) {
2359                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2360                                 "set in UTA\n", port_id);
2361                         return -EINVAL;
2362                 }
2363
2364                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2365                 if (index < 0) {
2366                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2367                                         port_id);
2368                         return -ENOSPC;
2369                 }
2370         }
2371
2372         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2373         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2374         if (ret == 0) {
2375                 /* Update address in NIC data structure */
2376                 if (on)
2377                         ether_addr_copy(addr,
2378                                         &dev->data->hash_mac_addrs[index]);
2379                 else
2380                         ether_addr_copy(&null_mac_addr,
2381                                         &dev->data->hash_mac_addrs[index]);
2382         }
2383
2384         return ret;
2385 }
2386
2387 int
2388 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2389 {
2390         struct rte_eth_dev *dev;
2391
2392         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2393
2394         dev = &rte_eth_devices[port_id];
2395
2396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2397         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2398 }
2399
2400 int
2401 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2402 {
2403         uint16_t num_vfs;
2404         struct rte_eth_dev *dev;
2405         struct rte_eth_dev_info dev_info;
2406
2407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2408
2409         dev = &rte_eth_devices[port_id];
2410         rte_eth_dev_info_get(port_id, &dev_info);
2411
2412         num_vfs = dev_info.max_vfs;
2413         if (vf > num_vfs) {
2414                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2415                 return -EINVAL;
2416         }
2417
2418         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2419         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2420 }
2421
2422 int
2423 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2424 {
2425         uint16_t num_vfs;
2426         struct rte_eth_dev *dev;
2427         struct rte_eth_dev_info dev_info;
2428
2429         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2430
2431         dev = &rte_eth_devices[port_id];
2432         rte_eth_dev_info_get(port_id, &dev_info);
2433
2434         num_vfs = dev_info.max_vfs;
2435         if (vf > num_vfs) {
2436                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2437                 return -EINVAL;
2438         }
2439
2440         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2441         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2442 }
2443
2444 int
2445 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2446                                uint64_t vf_mask, uint8_t vlan_on)
2447 {
2448         struct rte_eth_dev *dev;
2449
2450         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2451
2452         dev = &rte_eth_devices[port_id];
2453
2454         if (vlan_id > ETHER_MAX_VLAN_ID) {
2455                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2456                         vlan_id);
2457                 return -EINVAL;
2458         }
2459
2460         if (vf_mask == 0) {
2461                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2462                 return -EINVAL;
2463         }
2464
2465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2466         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2467                                                    vf_mask, vlan_on);
2468 }
2469
2470 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2471                                         uint16_t tx_rate)
2472 {
2473         struct rte_eth_dev *dev;
2474         struct rte_eth_dev_info dev_info;
2475         struct rte_eth_link link;
2476
2477         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2478
2479         dev = &rte_eth_devices[port_id];
2480         rte_eth_dev_info_get(port_id, &dev_info);
2481         link = dev->data->dev_link;
2482
2483         if (queue_idx > dev_info.max_tx_queues) {
2484                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2485                                 "invalid queue id=%d\n", port_id, queue_idx);
2486                 return -EINVAL;
2487         }
2488
2489         if (tx_rate > link.link_speed) {
2490                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2491                                 "bigger than link speed= %d\n",
2492                         tx_rate, link.link_speed);
2493                 return -EINVAL;
2494         }
2495
2496         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2497         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2498 }
2499
2500 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2501                                 uint64_t q_msk)
2502 {
2503         struct rte_eth_dev *dev;
2504         struct rte_eth_dev_info dev_info;
2505         struct rte_eth_link link;
2506
2507         if (q_msk == 0)
2508                 return 0;
2509
2510         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2511
2512         dev = &rte_eth_devices[port_id];
2513         rte_eth_dev_info_get(port_id, &dev_info);
2514         link = dev->data->dev_link;
2515
2516         if (vf > dev_info.max_vfs) {
2517                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2518                                 "invalid vf id=%d\n", port_id, vf);
2519                 return -EINVAL;
2520         }
2521
2522         if (tx_rate > link.link_speed) {
2523                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2524                                 "bigger than link speed= %d\n",
2525                                 tx_rate, link.link_speed);
2526                 return -EINVAL;
2527         }
2528
2529         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2530         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2531 }
2532
2533 int
2534 rte_eth_mirror_rule_set(uint8_t port_id,
2535                         struct rte_eth_mirror_conf *mirror_conf,
2536                         uint8_t rule_id, uint8_t on)
2537 {
2538         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2539
2540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2541         if (mirror_conf->rule_type == 0) {
2542                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2543                 return -EINVAL;
2544         }
2545
2546         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2547                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2548                                 ETH_64_POOLS - 1);
2549                 return -EINVAL;
2550         }
2551
2552         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2553              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2554             (mirror_conf->pool_mask == 0)) {
2555                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2556                 return -EINVAL;
2557         }
2558
2559         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2560             mirror_conf->vlan.vlan_mask == 0) {
2561                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2562                 return -EINVAL;
2563         }
2564
2565         dev = &rte_eth_devices[port_id];
2566         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2567
2568         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2569 }
2570
2571 int
2572 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2573 {
2574         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2575
2576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2577
2578         dev = &rte_eth_devices[port_id];
2579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2580
2581         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2582 }
2583
2584 int
2585 rte_eth_dev_callback_register(uint8_t port_id,
2586                         enum rte_eth_event_type event,
2587                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2588 {
2589         struct rte_eth_dev *dev;
2590         struct rte_eth_dev_callback *user_cb;
2591
2592         if (!cb_fn)
2593                 return -EINVAL;
2594
2595         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2596
2597         dev = &rte_eth_devices[port_id];
2598         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2599
2600         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2601                 if (user_cb->cb_fn == cb_fn &&
2602                         user_cb->cb_arg == cb_arg &&
2603                         user_cb->event == event) {
2604                         break;
2605                 }
2606         }
2607
2608         /* create a new callback. */
2609         if (user_cb == NULL)
2610                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2611                                         sizeof(struct rte_eth_dev_callback), 0);
2612         if (user_cb != NULL) {
2613                 user_cb->cb_fn = cb_fn;
2614                 user_cb->cb_arg = cb_arg;
2615                 user_cb->event = event;
2616                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2617         }
2618
2619         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2620         return (user_cb == NULL) ? -ENOMEM : 0;
2621 }
2622
2623 int
2624 rte_eth_dev_callback_unregister(uint8_t port_id,
2625                         enum rte_eth_event_type event,
2626                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2627 {
2628         int ret;
2629         struct rte_eth_dev *dev;
2630         struct rte_eth_dev_callback *cb, *next;
2631
2632         if (!cb_fn)
2633                 return -EINVAL;
2634
2635         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2636
2637         dev = &rte_eth_devices[port_id];
2638         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2639
2640         ret = 0;
2641         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2642
2643                 next = TAILQ_NEXT(cb, next);
2644
2645                 if (cb->cb_fn != cb_fn || cb->event != event ||
2646                                 (cb->cb_arg != (void *)-1 &&
2647                                 cb->cb_arg != cb_arg))
2648                         continue;
2649
2650                 /*
2651                  * if this callback is not executing right now,
2652                  * then remove it.
2653                  */
2654                 if (cb->active == 0) {
2655                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2656                         rte_free(cb);
2657                 } else {
2658                         ret = -EAGAIN;
2659                 }
2660         }
2661
2662         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2663         return ret;
2664 }
2665
2666 void
2667 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2668         enum rte_eth_event_type event)
2669 {
2670         struct rte_eth_dev_callback *cb_lst;
2671         struct rte_eth_dev_callback dev_cb;
2672
2673         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2674         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2675                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2676                         continue;
2677                 dev_cb = *cb_lst;
2678                 cb_lst->active = 1;
2679                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2680                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2681                                                 dev_cb.cb_arg);
2682                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2683                 cb_lst->active = 0;
2684         }
2685         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2686 }
2687
2688 int
2689 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2690 {
2691         uint32_t vec;
2692         struct rte_eth_dev *dev;
2693         struct rte_intr_handle *intr_handle;
2694         uint16_t qid;
2695         int rc;
2696
2697         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2698
2699         dev = &rte_eth_devices[port_id];
2700         intr_handle = &dev->pci_dev->intr_handle;
2701         if (!intr_handle->intr_vec) {
2702                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2703                 return -EPERM;
2704         }
2705
2706         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2707                 vec = intr_handle->intr_vec[qid];
2708                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2709                 if (rc && rc != -EEXIST) {
2710                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2711                                         " op %d epfd %d vec %u\n",
2712                                         port_id, qid, op, epfd, vec);
2713                 }
2714         }
2715
2716         return 0;
2717 }
2718
2719 const struct rte_memzone *
2720 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2721                          uint16_t queue_id, size_t size, unsigned align,
2722                          int socket_id)
2723 {
2724         char z_name[RTE_MEMZONE_NAMESIZE];
2725         const struct rte_memzone *mz;
2726
2727         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2728                  dev->driver->pci_drv.name, ring_name,
2729                  dev->data->port_id, queue_id);
2730
2731         mz = rte_memzone_lookup(z_name);
2732         if (mz)
2733                 return mz;
2734
2735         if (rte_xen_dom0_supported())
2736                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2737                                                    0, align, RTE_PGSIZE_2M);
2738         else
2739                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2740                                                    0, align);
2741 }
2742
2743 int
2744 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2745                           int epfd, int op, void *data)
2746 {
2747         uint32_t vec;
2748         struct rte_eth_dev *dev;
2749         struct rte_intr_handle *intr_handle;
2750         int rc;
2751
2752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2753
2754         dev = &rte_eth_devices[port_id];
2755         if (queue_id >= dev->data->nb_rx_queues) {
2756                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2757                 return -EINVAL;
2758         }
2759
2760         intr_handle = &dev->pci_dev->intr_handle;
2761         if (!intr_handle->intr_vec) {
2762                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2763                 return -EPERM;
2764         }
2765
2766         vec = intr_handle->intr_vec[queue_id];
2767         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2768         if (rc && rc != -EEXIST) {
2769                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2770                                 " op %d epfd %d vec %u\n",
2771                                 port_id, queue_id, op, epfd, vec);
2772                 return rc;
2773         }
2774
2775         return 0;
2776 }
2777
2778 int
2779 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2780                            uint16_t queue_id)
2781 {
2782         struct rte_eth_dev *dev;
2783
2784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2785
2786         dev = &rte_eth_devices[port_id];
2787
2788         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2789         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2790 }
2791
2792 int
2793 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2794                             uint16_t queue_id)
2795 {
2796         struct rte_eth_dev *dev;
2797
2798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2799
2800         dev = &rte_eth_devices[port_id];
2801
2802         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2803         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2804 }
2805
2806 #ifdef RTE_NIC_BYPASS
2807 int rte_eth_dev_bypass_init(uint8_t port_id)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2812
2813         dev = &rte_eth_devices[port_id];
2814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2815         (*dev->dev_ops->bypass_init)(dev);
2816         return 0;
2817 }
2818
2819 int
2820 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825
2826         dev = &rte_eth_devices[port_id];
2827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2828         (*dev->dev_ops->bypass_state_show)(dev, state);
2829         return 0;
2830 }
2831
2832 int
2833 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2834 {
2835         struct rte_eth_dev *dev;
2836
2837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838
2839         dev = &rte_eth_devices[port_id];
2840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2841         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2842         return 0;
2843 }
2844
2845 int
2846 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2847 {
2848         struct rte_eth_dev *dev;
2849
2850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2851
2852         dev = &rte_eth_devices[port_id];
2853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2854         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2855         return 0;
2856 }
2857
2858 int
2859 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2860 {
2861         struct rte_eth_dev *dev;
2862
2863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2864
2865         dev = &rte_eth_devices[port_id];
2866
2867         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2868         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2869         return 0;
2870 }
2871
2872 int
2873 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2874 {
2875         struct rte_eth_dev *dev;
2876
2877         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2878
2879         dev = &rte_eth_devices[port_id];
2880
2881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2882         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2883         return 0;
2884 }
2885
2886 int
2887 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2888 {
2889         struct rte_eth_dev *dev;
2890
2891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2892
2893         dev = &rte_eth_devices[port_id];
2894
2895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2896         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2897         return 0;
2898 }
2899
2900 int
2901 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2902 {
2903         struct rte_eth_dev *dev;
2904
2905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2906
2907         dev = &rte_eth_devices[port_id];
2908
2909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2910         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2911         return 0;
2912 }
2913
2914 int
2915 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2916 {
2917         struct rte_eth_dev *dev;
2918
2919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920
2921         dev = &rte_eth_devices[port_id];
2922
2923         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2924         (*dev->dev_ops->bypass_wd_reset)(dev);
2925         return 0;
2926 }
2927 #endif
2928
2929 int
2930 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2931 {
2932         struct rte_eth_dev *dev;
2933
2934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2935
2936         dev = &rte_eth_devices[port_id];
2937         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2938         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2939                                 RTE_ETH_FILTER_NOP, NULL);
2940 }
2941
2942 int
2943 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2944                        enum rte_filter_op filter_op, void *arg)
2945 {
2946         struct rte_eth_dev *dev;
2947
2948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2949
2950         dev = &rte_eth_devices[port_id];
2951         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2952         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2953 }
2954
2955 void *
2956 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2957                 rte_rx_callback_fn fn, void *user_param)
2958 {
2959 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2960         rte_errno = ENOTSUP;
2961         return NULL;
2962 #endif
2963         /* check input parameters */
2964         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2965                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2966                 rte_errno = EINVAL;
2967                 return NULL;
2968         }
2969         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2970
2971         if (cb == NULL) {
2972                 rte_errno = ENOMEM;
2973                 return NULL;
2974         }
2975
2976         cb->fn.rx = fn;
2977         cb->param = user_param;
2978
2979         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2980         /* Add the callbacks in fifo order. */
2981         struct rte_eth_rxtx_callback *tail =
2982                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2983
2984         if (!tail) {
2985                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2986
2987         } else {
2988                 while (tail->next)
2989                         tail = tail->next;
2990                 tail->next = cb;
2991         }
2992         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2993
2994         return cb;
2995 }
2996
2997 void *
2998 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2999                 rte_rx_callback_fn fn, void *user_param)
3000 {
3001 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3002         rte_errno = ENOTSUP;
3003         return NULL;
3004 #endif
3005         /* check input parameters */
3006         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3007                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3008                 rte_errno = EINVAL;
3009                 return NULL;
3010         }
3011
3012         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3013
3014         if (cb == NULL) {
3015                 rte_errno = ENOMEM;
3016                 return NULL;
3017         }
3018
3019         cb->fn.rx = fn;
3020         cb->param = user_param;
3021
3022         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3023         /* Add the callbacks at fisrt position*/
3024         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3025         rte_smp_wmb();
3026         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3027         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3028
3029         return cb;
3030 }
3031
3032 void *
3033 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3034                 rte_tx_callback_fn fn, void *user_param)
3035 {
3036 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3037         rte_errno = ENOTSUP;
3038         return NULL;
3039 #endif
3040         /* check input parameters */
3041         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3042                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3043                 rte_errno = EINVAL;
3044                 return NULL;
3045         }
3046
3047         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3048
3049         if (cb == NULL) {
3050                 rte_errno = ENOMEM;
3051                 return NULL;
3052         }
3053
3054         cb->fn.tx = fn;
3055         cb->param = user_param;
3056
3057         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3058         /* Add the callbacks in fifo order. */
3059         struct rte_eth_rxtx_callback *tail =
3060                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3061
3062         if (!tail) {
3063                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3064
3065         } else {
3066                 while (tail->next)
3067                         tail = tail->next;
3068                 tail->next = cb;
3069         }
3070         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3071
3072         return cb;
3073 }
3074
3075 int
3076 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3077                 struct rte_eth_rxtx_callback *user_cb)
3078 {
3079 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3080         return -ENOTSUP;
3081 #endif
3082         /* Check input parameters. */
3083         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3084         if (user_cb == NULL ||
3085                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3086                 return -EINVAL;
3087
3088         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3089         struct rte_eth_rxtx_callback *cb;
3090         struct rte_eth_rxtx_callback **prev_cb;
3091         int ret = -EINVAL;
3092
3093         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3094         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3095         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3096                 cb = *prev_cb;
3097                 if (cb == user_cb) {
3098                         /* Remove the user cb from the callback list. */
3099                         *prev_cb = cb->next;
3100                         ret = 0;
3101                         break;
3102                 }
3103         }
3104         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3105
3106         return ret;
3107 }
3108
3109 int
3110 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3111                 struct rte_eth_rxtx_callback *user_cb)
3112 {
3113 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3114         return -ENOTSUP;
3115 #endif
3116         /* Check input parameters. */
3117         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3118         if (user_cb == NULL ||
3119                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3120                 return -EINVAL;
3121
3122         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3123         int ret = -EINVAL;
3124         struct rte_eth_rxtx_callback *cb;
3125         struct rte_eth_rxtx_callback **prev_cb;
3126
3127         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3128         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3129         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3130                 cb = *prev_cb;
3131                 if (cb == user_cb) {
3132                         /* Remove the user cb from the callback list. */
3133                         *prev_cb = cb->next;
3134                         ret = 0;
3135                         break;
3136                 }
3137         }
3138         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3139
3140         return ret;
3141 }
3142
3143 int
3144 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3145         struct rte_eth_rxq_info *qinfo)
3146 {
3147         struct rte_eth_dev *dev;
3148
3149         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3150
3151         if (qinfo == NULL)
3152                 return -EINVAL;
3153
3154         dev = &rte_eth_devices[port_id];
3155         if (queue_id >= dev->data->nb_rx_queues) {
3156                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3157                 return -EINVAL;
3158         }
3159
3160         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3161
3162         memset(qinfo, 0, sizeof(*qinfo));
3163         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3164         return 0;
3165 }
3166
3167 int
3168 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3169         struct rte_eth_txq_info *qinfo)
3170 {
3171         struct rte_eth_dev *dev;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174
3175         if (qinfo == NULL)
3176                 return -EINVAL;
3177
3178         dev = &rte_eth_devices[port_id];
3179         if (queue_id >= dev->data->nb_tx_queues) {
3180                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3181                 return -EINVAL;
3182         }
3183
3184         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3185
3186         memset(qinfo, 0, sizeof(*qinfo));
3187         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3188         return 0;
3189 }
3190
3191 int
3192 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3193                              struct ether_addr *mc_addr_set,
3194                              uint32_t nb_mc_addr)
3195 {
3196         struct rte_eth_dev *dev;
3197
3198         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3199
3200         dev = &rte_eth_devices[port_id];
3201         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3202         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3203 }
3204
3205 int
3206 rte_eth_timesync_enable(uint8_t port_id)
3207 {
3208         struct rte_eth_dev *dev;
3209
3210         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3211         dev = &rte_eth_devices[port_id];
3212
3213         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3214         return (*dev->dev_ops->timesync_enable)(dev);
3215 }
3216
3217 int
3218 rte_eth_timesync_disable(uint8_t port_id)
3219 {
3220         struct rte_eth_dev *dev;
3221
3222         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3223         dev = &rte_eth_devices[port_id];
3224
3225         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3226         return (*dev->dev_ops->timesync_disable)(dev);
3227 }
3228
3229 int
3230 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3231                                    uint32_t flags)
3232 {
3233         struct rte_eth_dev *dev;
3234
3235         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3236         dev = &rte_eth_devices[port_id];
3237
3238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3239         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3240 }
3241
3242 int
3243 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3244 {
3245         struct rte_eth_dev *dev;
3246
3247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3248         dev = &rte_eth_devices[port_id];
3249
3250         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3251         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3252 }
3253
3254 int
3255 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3256 {
3257         struct rte_eth_dev *dev;
3258
3259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3260         dev = &rte_eth_devices[port_id];
3261
3262         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3263         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3264 }
3265
3266 int
3267 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3268 {
3269         struct rte_eth_dev *dev;
3270
3271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3272         dev = &rte_eth_devices[port_id];
3273
3274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3275         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3276 }
3277
3278 int
3279 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3280 {
3281         struct rte_eth_dev *dev;
3282
3283         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3284         dev = &rte_eth_devices[port_id];
3285
3286         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3287         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3288 }
3289
3290 int
3291 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3292 {
3293         struct rte_eth_dev *dev;
3294
3295         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3296
3297         dev = &rte_eth_devices[port_id];
3298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3299         return (*dev->dev_ops->get_reg)(dev, info);
3300 }
3301
3302 int
3303 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3304 {
3305         struct rte_eth_dev *dev;
3306
3307         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3308
3309         dev = &rte_eth_devices[port_id];
3310         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3311         return (*dev->dev_ops->get_eeprom_length)(dev);
3312 }
3313
3314 int
3315 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3316 {
3317         struct rte_eth_dev *dev;
3318
3319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3320
3321         dev = &rte_eth_devices[port_id];
3322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3323         return (*dev->dev_ops->get_eeprom)(dev, info);
3324 }
3325
3326 int
3327 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3328 {
3329         struct rte_eth_dev *dev;
3330
3331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3332
3333         dev = &rte_eth_devices[port_id];
3334         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3335         return (*dev->dev_ops->set_eeprom)(dev, info);
3336 }
3337
3338 int
3339 rte_eth_dev_get_dcb_info(uint8_t port_id,
3340                              struct rte_eth_dcb_info *dcb_info)
3341 {
3342         struct rte_eth_dev *dev;
3343
3344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3345
3346         dev = &rte_eth_devices[port_id];
3347         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3348
3349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3350         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3351 }
3352
3353 void
3354 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3355 {
3356         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3357                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3358                                 eth_dev, pci_dev);
3359                 return;
3360         }
3361
3362         eth_dev->data->dev_flags = 0;
3363         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3364                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3365         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3366                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3367
3368         eth_dev->data->kdrv = pci_dev->kdrv;
3369         eth_dev->data->numa_node = pci_dev->numa_node;
3370         eth_dev->data->drv_name = pci_dev->driver->name;
3371 }
3372
3373 int
3374 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3375                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3376 {
3377         struct rte_eth_dev *dev;
3378
3379         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3380         if (l2_tunnel == NULL) {
3381                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3382                 return -EINVAL;
3383         }
3384
3385         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3386                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3387                 return -EINVAL;
3388         }
3389
3390         dev = &rte_eth_devices[port_id];
3391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3392                                 -ENOTSUP);
3393         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3394 }
3395
3396 int
3397 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3398                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3399                                   uint32_t mask,
3400                                   uint8_t en)
3401 {
3402         struct rte_eth_dev *dev;
3403
3404         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3405
3406         if (l2_tunnel == NULL) {
3407                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3408                 return -EINVAL;
3409         }
3410
3411         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3412                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3413                 return -EINVAL;
3414         }
3415
3416         if (mask == 0) {
3417                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3418                 return -EINVAL;
3419         }
3420
3421         dev = &rte_eth_devices[port_id];
3422         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3423                                 -ENOTSUP);
3424         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3425 }