ethdev: fix port lookup if none
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 struct rte_eth_dev *
193 rte_eth_dev_allocate(const char *name)
194 {
195         uint8_t port_id;
196         struct rte_eth_dev *eth_dev;
197
198         port_id = rte_eth_dev_find_free_port();
199         if (port_id == RTE_MAX_ETHPORTS) {
200                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201                 return NULL;
202         }
203
204         if (rte_eth_dev_data == NULL)
205                 rte_eth_dev_data_alloc();
206
207         if (rte_eth_dev_allocated(name) != NULL) {
208                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209                                 name);
210                 return NULL;
211         }
212
213         eth_dev = &rte_eth_devices[port_id];
214         eth_dev->data = &rte_eth_dev_data[port_id];
215         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
216         eth_dev->data->port_id = port_id;
217         eth_dev->attached = DEV_ATTACHED;
218         eth_dev_last_created_port = port_id;
219         nb_ports++;
220         return eth_dev;
221 }
222
223 int
224 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
225 {
226         if (eth_dev == NULL)
227                 return -EINVAL;
228
229         eth_dev->attached = DEV_DETACHED;
230         nb_ports--;
231         return 0;
232 }
233
234 int
235 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
236                       struct rte_pci_device *pci_dev)
237 {
238         struct eth_driver    *eth_drv;
239         struct rte_eth_dev *eth_dev;
240         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
241
242         int diag;
243
244         eth_drv = (struct eth_driver *)pci_drv;
245
246         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
247                         sizeof(ethdev_name));
248
249         eth_dev = rte_eth_dev_allocate(ethdev_name);
250         if (eth_dev == NULL)
251                 return -ENOMEM;
252
253         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
254                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
255                                   eth_drv->dev_private_size,
256                                   RTE_CACHE_LINE_SIZE);
257                 if (eth_dev->data->dev_private == NULL)
258                         rte_panic("Cannot allocate memzone for private port data\n");
259         }
260         eth_dev->pci_dev = pci_dev;
261         eth_dev->driver = eth_drv;
262         eth_dev->data->rx_mbuf_alloc_failed = 0;
263
264         /* init user callbacks */
265         TAILQ_INIT(&(eth_dev->link_intr_cbs));
266
267         /*
268          * Set the default MTU.
269          */
270         eth_dev->data->mtu = ETHER_MTU;
271
272         /* Invoke PMD device initialization function */
273         diag = (*eth_drv->eth_dev_init)(eth_dev);
274         if (diag == 0)
275                 return 0;
276
277         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
278                         pci_drv->driver.name,
279                         (unsigned) pci_dev->id.vendor_id,
280                         (unsigned) pci_dev->id.device_id);
281         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
282                 rte_free(eth_dev->data->dev_private);
283         rte_eth_dev_release_port(eth_dev);
284         return diag;
285 }
286
287 int
288 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
289 {
290         const struct eth_driver *eth_drv;
291         struct rte_eth_dev *eth_dev;
292         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
293         int ret;
294
295         if (pci_dev == NULL)
296                 return -EINVAL;
297
298         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
299                         sizeof(ethdev_name));
300
301         eth_dev = rte_eth_dev_allocated(ethdev_name);
302         if (eth_dev == NULL)
303                 return -ENODEV;
304
305         eth_drv = (const struct eth_driver *)pci_dev->driver;
306
307         /* Invoke PMD device uninit function */
308         if (*eth_drv->eth_dev_uninit) {
309                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
310                 if (ret)
311                         return ret;
312         }
313
314         /* free ether device */
315         rte_eth_dev_release_port(eth_dev);
316
317         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
318                 rte_free(eth_dev->data->dev_private);
319
320         eth_dev->pci_dev = NULL;
321         eth_dev->driver = NULL;
322         eth_dev->data = NULL;
323
324         return 0;
325 }
326
327 int
328 rte_eth_dev_is_valid_port(uint8_t port_id)
329 {
330         if (port_id >= RTE_MAX_ETHPORTS ||
331             rte_eth_devices[port_id].attached != DEV_ATTACHED)
332                 return 0;
333         else
334                 return 1;
335 }
336
337 int
338 rte_eth_dev_socket_id(uint8_t port_id)
339 {
340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
341         return rte_eth_devices[port_id].data->numa_node;
342 }
343
344 uint8_t
345 rte_eth_dev_count(void)
346 {
347         return nb_ports;
348 }
349
350 int
351 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
352 {
353         char *tmp;
354
355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
356
357         if (name == NULL) {
358                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
359                 return -EINVAL;
360         }
361
362         /* shouldn't check 'rte_eth_devices[i].data',
363          * because it might be overwritten by VDEV PMD */
364         tmp = rte_eth_dev_data[port_id].name;
365         strcpy(name, tmp);
366         return 0;
367 }
368
369 int
370 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
371 {
372         int i;
373
374         if (name == NULL) {
375                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
376                 return -EINVAL;
377         }
378
379         if (!nb_ports)
380                 return -ENODEV;
381
382         *port_id = RTE_MAX_ETHPORTS;
383
384         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
385
386                 if (!strncmp(name,
387                         rte_eth_dev_data[i].name, strlen(name))) {
388
389                         *port_id = i;
390
391                         return 0;
392                 }
393         }
394         return -ENODEV;
395 }
396
397 static int
398 rte_eth_dev_is_detachable(uint8_t port_id)
399 {
400         uint32_t dev_flags;
401
402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
403
404         switch (rte_eth_devices[port_id].data->kdrv) {
405         case RTE_KDRV_IGB_UIO:
406         case RTE_KDRV_UIO_GENERIC:
407         case RTE_KDRV_NIC_UIO:
408         case RTE_KDRV_NONE:
409                 break;
410         case RTE_KDRV_VFIO:
411         default:
412                 return -ENOTSUP;
413         }
414         dev_flags = rte_eth_devices[port_id].data->dev_flags;
415         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
416                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
417                 return 0;
418         else
419                 return 1;
420 }
421
422 /* attach the new device, then store port_id of the device */
423 int
424 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
425 {
426         int ret = -1;
427         int current = rte_eth_dev_count();
428         char *name = NULL;
429         char *args = NULL;
430
431         if ((devargs == NULL) || (port_id == NULL)) {
432                 ret = -EINVAL;
433                 goto err;
434         }
435
436         /* parse devargs, then retrieve device name and args */
437         if (rte_eal_parse_devargs_str(devargs, &name, &args))
438                 goto err;
439
440         ret = rte_eal_dev_attach(name, args);
441         if (ret < 0)
442                 goto err;
443
444         /* no point looking at the port count if no port exists */
445         if (!rte_eth_dev_count()) {
446                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
447                 ret = -1;
448                 goto err;
449         }
450
451         /* if nothing happened, there is a bug here, since some driver told us
452          * it did attach a device, but did not create a port.
453          */
454         if (current == rte_eth_dev_count()) {
455                 ret = -1;
456                 goto err;
457         }
458
459         *port_id = eth_dev_last_created_port;
460         ret = 0;
461
462 err:
463         free(name);
464         free(args);
465         return ret;
466 }
467
468 /* detach the device, then store the name of the device */
469 int
470 rte_eth_dev_detach(uint8_t port_id, char *name)
471 {
472         int ret = -1;
473
474         if (name == NULL) {
475                 ret = -EINVAL;
476                 goto err;
477         }
478
479         /* FIXME: move this to eal, once device flags are relocated there */
480         if (rte_eth_dev_is_detachable(port_id))
481                 goto err;
482
483         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
484                  "%s", rte_eth_devices[port_id].data->name);
485         ret = rte_eal_dev_detach(name);
486         if (ret < 0)
487                 goto err;
488
489         return 0;
490
491 err:
492         return ret;
493 }
494
495 static int
496 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
497 {
498         uint16_t old_nb_queues = dev->data->nb_rx_queues;
499         void **rxq;
500         unsigned i;
501
502         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
503                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
504                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
505                                 RTE_CACHE_LINE_SIZE);
506                 if (dev->data->rx_queues == NULL) {
507                         dev->data->nb_rx_queues = 0;
508                         return -(ENOMEM);
509                 }
510         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
511                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
512
513                 rxq = dev->data->rx_queues;
514
515                 for (i = nb_queues; i < old_nb_queues; i++)
516                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
517                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
518                                 RTE_CACHE_LINE_SIZE);
519                 if (rxq == NULL)
520                         return -(ENOMEM);
521                 if (nb_queues > old_nb_queues) {
522                         uint16_t new_qs = nb_queues - old_nb_queues;
523
524                         memset(rxq + old_nb_queues, 0,
525                                 sizeof(rxq[0]) * new_qs);
526                 }
527
528                 dev->data->rx_queues = rxq;
529
530         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
531                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
532
533                 rxq = dev->data->rx_queues;
534
535                 for (i = nb_queues; i < old_nb_queues; i++)
536                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
537         }
538         dev->data->nb_rx_queues = nb_queues;
539         return 0;
540 }
541
542 int
543 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
544 {
545         struct rte_eth_dev *dev;
546
547         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
548
549         dev = &rte_eth_devices[port_id];
550         if (rx_queue_id >= dev->data->nb_rx_queues) {
551                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
552                 return -EINVAL;
553         }
554
555         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
556
557         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
558                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
559                         " already started\n",
560                         rx_queue_id, port_id);
561                 return 0;
562         }
563
564         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
565
566 }
567
568 int
569 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
570 {
571         struct rte_eth_dev *dev;
572
573         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
574
575         dev = &rte_eth_devices[port_id];
576         if (rx_queue_id >= dev->data->nb_rx_queues) {
577                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
578                 return -EINVAL;
579         }
580
581         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
582
583         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
584                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
585                         " already stopped\n",
586                         rx_queue_id, port_id);
587                 return 0;
588         }
589
590         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
591
592 }
593
594 int
595 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
596 {
597         struct rte_eth_dev *dev;
598
599         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
600
601         dev = &rte_eth_devices[port_id];
602         if (tx_queue_id >= dev->data->nb_tx_queues) {
603                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
604                 return -EINVAL;
605         }
606
607         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
608
609         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
610                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
611                         " already started\n",
612                         tx_queue_id, port_id);
613                 return 0;
614         }
615
616         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
617
618 }
619
620 int
621 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
622 {
623         struct rte_eth_dev *dev;
624
625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
626
627         dev = &rte_eth_devices[port_id];
628         if (tx_queue_id >= dev->data->nb_tx_queues) {
629                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
630                 return -EINVAL;
631         }
632
633         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
634
635         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
636                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
637                         " already stopped\n",
638                         tx_queue_id, port_id);
639                 return 0;
640         }
641
642         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
643
644 }
645
646 static int
647 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
648 {
649         uint16_t old_nb_queues = dev->data->nb_tx_queues;
650         void **txq;
651         unsigned i;
652
653         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
654                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
655                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
656                                                    RTE_CACHE_LINE_SIZE);
657                 if (dev->data->tx_queues == NULL) {
658                         dev->data->nb_tx_queues = 0;
659                         return -(ENOMEM);
660                 }
661         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
662                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
663
664                 txq = dev->data->tx_queues;
665
666                 for (i = nb_queues; i < old_nb_queues; i++)
667                         (*dev->dev_ops->tx_queue_release)(txq[i]);
668                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
669                                   RTE_CACHE_LINE_SIZE);
670                 if (txq == NULL)
671                         return -ENOMEM;
672                 if (nb_queues > old_nb_queues) {
673                         uint16_t new_qs = nb_queues - old_nb_queues;
674
675                         memset(txq + old_nb_queues, 0,
676                                sizeof(txq[0]) * new_qs);
677                 }
678
679                 dev->data->tx_queues = txq;
680
681         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
682                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
683
684                 txq = dev->data->tx_queues;
685
686                 for (i = nb_queues; i < old_nb_queues; i++)
687                         (*dev->dev_ops->tx_queue_release)(txq[i]);
688         }
689         dev->data->nb_tx_queues = nb_queues;
690         return 0;
691 }
692
693 uint32_t
694 rte_eth_speed_bitflag(uint32_t speed, int duplex)
695 {
696         switch (speed) {
697         case ETH_SPEED_NUM_10M:
698                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
699         case ETH_SPEED_NUM_100M:
700                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
701         case ETH_SPEED_NUM_1G:
702                 return ETH_LINK_SPEED_1G;
703         case ETH_SPEED_NUM_2_5G:
704                 return ETH_LINK_SPEED_2_5G;
705         case ETH_SPEED_NUM_5G:
706                 return ETH_LINK_SPEED_5G;
707         case ETH_SPEED_NUM_10G:
708                 return ETH_LINK_SPEED_10G;
709         case ETH_SPEED_NUM_20G:
710                 return ETH_LINK_SPEED_20G;
711         case ETH_SPEED_NUM_25G:
712                 return ETH_LINK_SPEED_25G;
713         case ETH_SPEED_NUM_40G:
714                 return ETH_LINK_SPEED_40G;
715         case ETH_SPEED_NUM_50G:
716                 return ETH_LINK_SPEED_50G;
717         case ETH_SPEED_NUM_56G:
718                 return ETH_LINK_SPEED_56G;
719         case ETH_SPEED_NUM_100G:
720                 return ETH_LINK_SPEED_100G;
721         default:
722                 return 0;
723         }
724 }
725
726 int
727 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
728                       const struct rte_eth_conf *dev_conf)
729 {
730         struct rte_eth_dev *dev;
731         struct rte_eth_dev_info dev_info;
732         int diag;
733
734         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
735
736         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
737                 RTE_PMD_DEBUG_TRACE(
738                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
739                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
740                 return -EINVAL;
741         }
742
743         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
744                 RTE_PMD_DEBUG_TRACE(
745                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
746                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
747                 return -EINVAL;
748         }
749
750         dev = &rte_eth_devices[port_id];
751
752         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
754
755         if (dev->data->dev_started) {
756                 RTE_PMD_DEBUG_TRACE(
757                     "port %d must be stopped to allow configuration\n", port_id);
758                 return -EBUSY;
759         }
760
761         /* Copy the dev_conf parameter into the dev structure */
762         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
763
764         /*
765          * Check that the numbers of RX and TX queues are not greater
766          * than the maximum number of RX and TX queues supported by the
767          * configured device.
768          */
769         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
770
771         if (nb_rx_q == 0 && nb_tx_q == 0) {
772                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
773                 return -EINVAL;
774         }
775
776         if (nb_rx_q > dev_info.max_rx_queues) {
777                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
778                                 port_id, nb_rx_q, dev_info.max_rx_queues);
779                 return -EINVAL;
780         }
781
782         if (nb_tx_q > dev_info.max_tx_queues) {
783                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
784                                 port_id, nb_tx_q, dev_info.max_tx_queues);
785                 return -EINVAL;
786         }
787
788         /*
789          * If link state interrupt is enabled, check that the
790          * device supports it.
791          */
792         if ((dev_conf->intr_conf.lsc == 1) &&
793                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
794                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
795                                         dev->data->drv_name);
796                         return -EINVAL;
797         }
798
799         /*
800          * If jumbo frames are enabled, check that the maximum RX packet
801          * length is supported by the configured device.
802          */
803         if (dev_conf->rxmode.jumbo_frame == 1) {
804                 if (dev_conf->rxmode.max_rx_pkt_len >
805                     dev_info.max_rx_pktlen) {
806                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
807                                 " > max valid value %u\n",
808                                 port_id,
809                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
810                                 (unsigned)dev_info.max_rx_pktlen);
811                         return -EINVAL;
812                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
813                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
814                                 " < min valid value %u\n",
815                                 port_id,
816                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
817                                 (unsigned)ETHER_MIN_LEN);
818                         return -EINVAL;
819                 }
820         } else {
821                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
822                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
823                         /* Use default value */
824                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
825                                                         ETHER_MAX_LEN;
826         }
827
828         /*
829          * Setup new number of RX/TX queues and reconfigure device.
830          */
831         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
832         if (diag != 0) {
833                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
834                                 port_id, diag);
835                 return diag;
836         }
837
838         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
839         if (diag != 0) {
840                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
841                                 port_id, diag);
842                 rte_eth_dev_rx_queue_config(dev, 0);
843                 return diag;
844         }
845
846         diag = (*dev->dev_ops->dev_configure)(dev);
847         if (diag != 0) {
848                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
849                                 port_id, diag);
850                 rte_eth_dev_rx_queue_config(dev, 0);
851                 rte_eth_dev_tx_queue_config(dev, 0);
852                 return diag;
853         }
854
855         return 0;
856 }
857
858 static void
859 rte_eth_dev_config_restore(uint8_t port_id)
860 {
861         struct rte_eth_dev *dev;
862         struct rte_eth_dev_info dev_info;
863         struct ether_addr addr;
864         uint16_t i;
865         uint32_t pool = 0;
866
867         dev = &rte_eth_devices[port_id];
868
869         rte_eth_dev_info_get(port_id, &dev_info);
870
871         if (RTE_ETH_DEV_SRIOV(dev).active)
872                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
873
874         /* replay MAC address configuration */
875         for (i = 0; i < dev_info.max_mac_addrs; i++) {
876                 addr = dev->data->mac_addrs[i];
877
878                 /* skip zero address */
879                 if (is_zero_ether_addr(&addr))
880                         continue;
881
882                 /* add address to the hardware */
883                 if  (*dev->dev_ops->mac_addr_add &&
884                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
885                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
886                 else {
887                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
888                                         port_id);
889                         /* exit the loop but not return an error */
890                         break;
891                 }
892         }
893
894         /* replay promiscuous configuration */
895         if (rte_eth_promiscuous_get(port_id) == 1)
896                 rte_eth_promiscuous_enable(port_id);
897         else if (rte_eth_promiscuous_get(port_id) == 0)
898                 rte_eth_promiscuous_disable(port_id);
899
900         /* replay all multicast configuration */
901         if (rte_eth_allmulticast_get(port_id) == 1)
902                 rte_eth_allmulticast_enable(port_id);
903         else if (rte_eth_allmulticast_get(port_id) == 0)
904                 rte_eth_allmulticast_disable(port_id);
905 }
906
907 int
908 rte_eth_dev_start(uint8_t port_id)
909 {
910         struct rte_eth_dev *dev;
911         int diag;
912
913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
914
915         dev = &rte_eth_devices[port_id];
916
917         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
918
919         if (dev->data->dev_started != 0) {
920                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
921                         " already started\n",
922                         port_id);
923                 return 0;
924         }
925
926         diag = (*dev->dev_ops->dev_start)(dev);
927         if (diag == 0)
928                 dev->data->dev_started = 1;
929         else
930                 return diag;
931
932         rte_eth_dev_config_restore(port_id);
933
934         if (dev->data->dev_conf.intr_conf.lsc == 0) {
935                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
936                 (*dev->dev_ops->link_update)(dev, 0);
937         }
938         return 0;
939 }
940
941 void
942 rte_eth_dev_stop(uint8_t port_id)
943 {
944         struct rte_eth_dev *dev;
945
946         RTE_ETH_VALID_PORTID_OR_RET(port_id);
947         dev = &rte_eth_devices[port_id];
948
949         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
950
951         if (dev->data->dev_started == 0) {
952                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
953                         " already stopped\n",
954                         port_id);
955                 return;
956         }
957
958         dev->data->dev_started = 0;
959         (*dev->dev_ops->dev_stop)(dev);
960 }
961
962 int
963 rte_eth_dev_set_link_up(uint8_t port_id)
964 {
965         struct rte_eth_dev *dev;
966
967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
968
969         dev = &rte_eth_devices[port_id];
970
971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
972         return (*dev->dev_ops->dev_set_link_up)(dev);
973 }
974
975 int
976 rte_eth_dev_set_link_down(uint8_t port_id)
977 {
978         struct rte_eth_dev *dev;
979
980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
981
982         dev = &rte_eth_devices[port_id];
983
984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
985         return (*dev->dev_ops->dev_set_link_down)(dev);
986 }
987
988 void
989 rte_eth_dev_close(uint8_t port_id)
990 {
991         struct rte_eth_dev *dev;
992
993         RTE_ETH_VALID_PORTID_OR_RET(port_id);
994         dev = &rte_eth_devices[port_id];
995
996         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
997         dev->data->dev_started = 0;
998         (*dev->dev_ops->dev_close)(dev);
999
1000         rte_free(dev->data->rx_queues);
1001         dev->data->rx_queues = NULL;
1002         rte_free(dev->data->tx_queues);
1003         dev->data->tx_queues = NULL;
1004 }
1005
1006 int
1007 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1008                        uint16_t nb_rx_desc, unsigned int socket_id,
1009                        const struct rte_eth_rxconf *rx_conf,
1010                        struct rte_mempool *mp)
1011 {
1012         int ret;
1013         uint32_t mbp_buf_size;
1014         struct rte_eth_dev *dev;
1015         struct rte_eth_dev_info dev_info;
1016
1017         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1018
1019         dev = &rte_eth_devices[port_id];
1020         if (rx_queue_id >= dev->data->nb_rx_queues) {
1021                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1022                 return -EINVAL;
1023         }
1024
1025         if (dev->data->dev_started) {
1026                 RTE_PMD_DEBUG_TRACE(
1027                     "port %d must be stopped to allow configuration\n", port_id);
1028                 return -EBUSY;
1029         }
1030
1031         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1033
1034         /*
1035          * Check the size of the mbuf data buffer.
1036          * This value must be provided in the private data of the memory pool.
1037          * First check that the memory pool has a valid private data.
1038          */
1039         rte_eth_dev_info_get(port_id, &dev_info);
1040         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1041                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1042                                 mp->name, (int) mp->private_data_size,
1043                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1044                 return -ENOSPC;
1045         }
1046         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1047
1048         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1049                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1050                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1051                                 "=%d)\n",
1052                                 mp->name,
1053                                 (int)mbp_buf_size,
1054                                 (int)(RTE_PKTMBUF_HEADROOM +
1055                                       dev_info.min_rx_bufsize),
1056                                 (int)RTE_PKTMBUF_HEADROOM,
1057                                 (int)dev_info.min_rx_bufsize);
1058                 return -EINVAL;
1059         }
1060
1061         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1062                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1063                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1064
1065                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1066                         "should be: <= %hu, = %hu, and a product of %hu\n",
1067                         nb_rx_desc,
1068                         dev_info.rx_desc_lim.nb_max,
1069                         dev_info.rx_desc_lim.nb_min,
1070                         dev_info.rx_desc_lim.nb_align);
1071                 return -EINVAL;
1072         }
1073
1074         if (rx_conf == NULL)
1075                 rx_conf = &dev_info.default_rxconf;
1076
1077         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1078                                               socket_id, rx_conf, mp);
1079         if (!ret) {
1080                 if (!dev->data->min_rx_buf_size ||
1081                     dev->data->min_rx_buf_size > mbp_buf_size)
1082                         dev->data->min_rx_buf_size = mbp_buf_size;
1083         }
1084
1085         return ret;
1086 }
1087
1088 int
1089 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1090                        uint16_t nb_tx_desc, unsigned int socket_id,
1091                        const struct rte_eth_txconf *tx_conf)
1092 {
1093         struct rte_eth_dev *dev;
1094         struct rte_eth_dev_info dev_info;
1095
1096         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1097
1098         dev = &rte_eth_devices[port_id];
1099         if (tx_queue_id >= dev->data->nb_tx_queues) {
1100                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1101                 return -EINVAL;
1102         }
1103
1104         if (dev->data->dev_started) {
1105                 RTE_PMD_DEBUG_TRACE(
1106                     "port %d must be stopped to allow configuration\n", port_id);
1107                 return -EBUSY;
1108         }
1109
1110         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1111         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1112
1113         rte_eth_dev_info_get(port_id, &dev_info);
1114
1115         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1116             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1117             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1118                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1119                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1120                                 nb_tx_desc,
1121                                 dev_info.tx_desc_lim.nb_max,
1122                                 dev_info.tx_desc_lim.nb_min,
1123                                 dev_info.tx_desc_lim.nb_align);
1124                 return -EINVAL;
1125         }
1126
1127         if (tx_conf == NULL)
1128                 tx_conf = &dev_info.default_txconf;
1129
1130         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1131                                                socket_id, tx_conf);
1132 }
1133
1134 void
1135 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1136                 void *userdata __rte_unused)
1137 {
1138         unsigned i;
1139
1140         for (i = 0; i < unsent; i++)
1141                 rte_pktmbuf_free(pkts[i]);
1142 }
1143
1144 void
1145 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1146                 void *userdata)
1147 {
1148         uint64_t *count = userdata;
1149         unsigned i;
1150
1151         for (i = 0; i < unsent; i++)
1152                 rte_pktmbuf_free(pkts[i]);
1153
1154         *count += unsent;
1155 }
1156
1157 int
1158 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1159                 buffer_tx_error_fn cbfn, void *userdata)
1160 {
1161         buffer->error_callback = cbfn;
1162         buffer->error_userdata = userdata;
1163         return 0;
1164 }
1165
1166 int
1167 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1168 {
1169         int ret = 0;
1170
1171         if (buffer == NULL)
1172                 return -EINVAL;
1173
1174         buffer->size = size;
1175         if (buffer->error_callback == NULL) {
1176                 ret = rte_eth_tx_buffer_set_err_callback(
1177                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1178         }
1179
1180         return ret;
1181 }
1182
1183 void
1184 rte_eth_promiscuous_enable(uint8_t port_id)
1185 {
1186         struct rte_eth_dev *dev;
1187
1188         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1189         dev = &rte_eth_devices[port_id];
1190
1191         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1192         (*dev->dev_ops->promiscuous_enable)(dev);
1193         dev->data->promiscuous = 1;
1194 }
1195
1196 void
1197 rte_eth_promiscuous_disable(uint8_t port_id)
1198 {
1199         struct rte_eth_dev *dev;
1200
1201         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1202         dev = &rte_eth_devices[port_id];
1203
1204         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1205         dev->data->promiscuous = 0;
1206         (*dev->dev_ops->promiscuous_disable)(dev);
1207 }
1208
1209 int
1210 rte_eth_promiscuous_get(uint8_t port_id)
1211 {
1212         struct rte_eth_dev *dev;
1213
1214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1215
1216         dev = &rte_eth_devices[port_id];
1217         return dev->data->promiscuous;
1218 }
1219
1220 void
1221 rte_eth_allmulticast_enable(uint8_t port_id)
1222 {
1223         struct rte_eth_dev *dev;
1224
1225         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1226         dev = &rte_eth_devices[port_id];
1227
1228         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1229         (*dev->dev_ops->allmulticast_enable)(dev);
1230         dev->data->all_multicast = 1;
1231 }
1232
1233 void
1234 rte_eth_allmulticast_disable(uint8_t port_id)
1235 {
1236         struct rte_eth_dev *dev;
1237
1238         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1239         dev = &rte_eth_devices[port_id];
1240
1241         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1242         dev->data->all_multicast = 0;
1243         (*dev->dev_ops->allmulticast_disable)(dev);
1244 }
1245
1246 int
1247 rte_eth_allmulticast_get(uint8_t port_id)
1248 {
1249         struct rte_eth_dev *dev;
1250
1251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1252
1253         dev = &rte_eth_devices[port_id];
1254         return dev->data->all_multicast;
1255 }
1256
1257 static inline int
1258 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1259                                 struct rte_eth_link *link)
1260 {
1261         struct rte_eth_link *dst = link;
1262         struct rte_eth_link *src = &(dev->data->dev_link);
1263
1264         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1265                                         *(uint64_t *)src) == 0)
1266                 return -1;
1267
1268         return 0;
1269 }
1270
1271 void
1272 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1273 {
1274         struct rte_eth_dev *dev;
1275
1276         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1277         dev = &rte_eth_devices[port_id];
1278
1279         if (dev->data->dev_conf.intr_conf.lsc != 0)
1280                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1281         else {
1282                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1283                 (*dev->dev_ops->link_update)(dev, 1);
1284                 *eth_link = dev->data->dev_link;
1285         }
1286 }
1287
1288 void
1289 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1290 {
1291         struct rte_eth_dev *dev;
1292
1293         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1294         dev = &rte_eth_devices[port_id];
1295
1296         if (dev->data->dev_conf.intr_conf.lsc != 0)
1297                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1298         else {
1299                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1300                 (*dev->dev_ops->link_update)(dev, 0);
1301                 *eth_link = dev->data->dev_link;
1302         }
1303 }
1304
1305 int
1306 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1307 {
1308         struct rte_eth_dev *dev;
1309
1310         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1311
1312         dev = &rte_eth_devices[port_id];
1313         memset(stats, 0, sizeof(*stats));
1314
1315         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1316         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1317         (*dev->dev_ops->stats_get)(dev, stats);
1318         return 0;
1319 }
1320
1321 void
1322 rte_eth_stats_reset(uint8_t port_id)
1323 {
1324         struct rte_eth_dev *dev;
1325
1326         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1327         dev = &rte_eth_devices[port_id];
1328
1329         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1330         (*dev->dev_ops->stats_reset)(dev);
1331         dev->data->rx_mbuf_alloc_failed = 0;
1332 }
1333
1334 static int
1335 get_xstats_count(uint8_t port_id)
1336 {
1337         struct rte_eth_dev *dev;
1338         int count;
1339
1340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1341         dev = &rte_eth_devices[port_id];
1342         if (dev->dev_ops->xstats_get_names != NULL) {
1343                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1344                 if (count < 0)
1345                         return count;
1346         } else
1347                 count = 0;
1348         count += RTE_NB_STATS;
1349         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1350                  RTE_NB_RXQ_STATS;
1351         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1352                  RTE_NB_TXQ_STATS;
1353         return count;
1354 }
1355
1356 int
1357 rte_eth_xstats_get_names(uint8_t port_id,
1358         struct rte_eth_xstat_name *xstats_names,
1359         unsigned size)
1360 {
1361         struct rte_eth_dev *dev;
1362         int cnt_used_entries;
1363         int cnt_expected_entries;
1364         int cnt_driver_entries;
1365         uint32_t idx, id_queue;
1366         uint16_t num_q;
1367
1368         cnt_expected_entries = get_xstats_count(port_id);
1369         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1370                         (int)size < cnt_expected_entries)
1371                 return cnt_expected_entries;
1372
1373         /* port_id checked in get_xstats_count() */
1374         dev = &rte_eth_devices[port_id];
1375         cnt_used_entries = 0;
1376
1377         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1378                 snprintf(xstats_names[cnt_used_entries].name,
1379                         sizeof(xstats_names[0].name),
1380                         "%s", rte_stats_strings[idx].name);
1381                 cnt_used_entries++;
1382         }
1383         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1384         for (id_queue = 0; id_queue < num_q; id_queue++) {
1385                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1386                         snprintf(xstats_names[cnt_used_entries].name,
1387                                 sizeof(xstats_names[0].name),
1388                                 "rx_q%u%s",
1389                                 id_queue, rte_rxq_stats_strings[idx].name);
1390                         cnt_used_entries++;
1391                 }
1392
1393         }
1394         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1395         for (id_queue = 0; id_queue < num_q; id_queue++) {
1396                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1397                         snprintf(xstats_names[cnt_used_entries].name,
1398                                 sizeof(xstats_names[0].name),
1399                                 "tx_q%u%s",
1400                                 id_queue, rte_txq_stats_strings[idx].name);
1401                         cnt_used_entries++;
1402                 }
1403         }
1404
1405         if (dev->dev_ops->xstats_get_names != NULL) {
1406                 /* If there are any driver-specific xstats, append them
1407                  * to end of list.
1408                  */
1409                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1410                         dev,
1411                         xstats_names + cnt_used_entries,
1412                         size - cnt_used_entries);
1413                 if (cnt_driver_entries < 0)
1414                         return cnt_driver_entries;
1415                 cnt_used_entries += cnt_driver_entries;
1416         }
1417
1418         return cnt_used_entries;
1419 }
1420
1421 /* retrieve ethdev extended statistics */
1422 int
1423 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1424         unsigned n)
1425 {
1426         struct rte_eth_stats eth_stats;
1427         struct rte_eth_dev *dev;
1428         unsigned count = 0, i, q;
1429         signed xcount = 0;
1430         uint64_t val, *stats_ptr;
1431         uint16_t nb_rxqs, nb_txqs;
1432
1433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1434
1435         dev = &rte_eth_devices[port_id];
1436
1437         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1438         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1439
1440         /* Return generic statistics */
1441         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1442                 (nb_txqs * RTE_NB_TXQ_STATS);
1443
1444         /* implemented by the driver */
1445         if (dev->dev_ops->xstats_get != NULL) {
1446                 /* Retrieve the xstats from the driver at the end of the
1447                  * xstats struct.
1448                  */
1449                 xcount = (*dev->dev_ops->xstats_get)(dev,
1450                                      xstats ? xstats + count : NULL,
1451                                      (n > count) ? n - count : 0);
1452
1453                 if (xcount < 0)
1454                         return xcount;
1455         }
1456
1457         if (n < count + xcount || xstats == NULL)
1458                 return count + xcount;
1459
1460         /* now fill the xstats structure */
1461         count = 0;
1462         rte_eth_stats_get(port_id, &eth_stats);
1463
1464         /* global stats */
1465         for (i = 0; i < RTE_NB_STATS; i++) {
1466                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1467                                         rte_stats_strings[i].offset);
1468                 val = *stats_ptr;
1469                 xstats[count++].value = val;
1470         }
1471
1472         /* per-rxq stats */
1473         for (q = 0; q < nb_rxqs; q++) {
1474                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1475                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1476                                         rte_rxq_stats_strings[i].offset +
1477                                         q * sizeof(uint64_t));
1478                         val = *stats_ptr;
1479                         xstats[count++].value = val;
1480                 }
1481         }
1482
1483         /* per-txq stats */
1484         for (q = 0; q < nb_txqs; q++) {
1485                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1486                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1487                                         rte_txq_stats_strings[i].offset +
1488                                         q * sizeof(uint64_t));
1489                         val = *stats_ptr;
1490                         xstats[count++].value = val;
1491                 }
1492         }
1493
1494         for (i = 0; i < count + xcount; i++)
1495                 xstats[i].id = i;
1496
1497         return count + xcount;
1498 }
1499
1500 /* reset ethdev extended statistics */
1501 void
1502 rte_eth_xstats_reset(uint8_t port_id)
1503 {
1504         struct rte_eth_dev *dev;
1505
1506         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1507         dev = &rte_eth_devices[port_id];
1508
1509         /* implemented by the driver */
1510         if (dev->dev_ops->xstats_reset != NULL) {
1511                 (*dev->dev_ops->xstats_reset)(dev);
1512                 return;
1513         }
1514
1515         /* fallback to default */
1516         rte_eth_stats_reset(port_id);
1517 }
1518
1519 static int
1520 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1521                 uint8_t is_rx)
1522 {
1523         struct rte_eth_dev *dev;
1524
1525         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1526
1527         dev = &rte_eth_devices[port_id];
1528
1529         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1530         return (*dev->dev_ops->queue_stats_mapping_set)
1531                         (dev, queue_id, stat_idx, is_rx);
1532 }
1533
1534
1535 int
1536 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1537                 uint8_t stat_idx)
1538 {
1539         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1540                         STAT_QMAP_TX);
1541 }
1542
1543
1544 int
1545 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1546                 uint8_t stat_idx)
1547 {
1548         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1549                         STAT_QMAP_RX);
1550 }
1551
1552 void
1553 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1554 {
1555         struct rte_eth_dev *dev;
1556         const struct rte_eth_desc_lim lim = {
1557                 .nb_max = UINT16_MAX,
1558                 .nb_min = 0,
1559                 .nb_align = 1,
1560         };
1561
1562         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1563         dev = &rte_eth_devices[port_id];
1564
1565         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1566         dev_info->rx_desc_lim = lim;
1567         dev_info->tx_desc_lim = lim;
1568
1569         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1570         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1571         dev_info->pci_dev = dev->pci_dev;
1572         dev_info->driver_name = dev->data->drv_name;
1573         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1574         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1575 }
1576
1577 int
1578 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1579                                  uint32_t *ptypes, int num)
1580 {
1581         int i, j;
1582         struct rte_eth_dev *dev;
1583         const uint32_t *all_ptypes;
1584
1585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1586         dev = &rte_eth_devices[port_id];
1587         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1588         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1589
1590         if (!all_ptypes)
1591                 return 0;
1592
1593         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1594                 if (all_ptypes[i] & ptype_mask) {
1595                         if (j < num)
1596                                 ptypes[j] = all_ptypes[i];
1597                         j++;
1598                 }
1599
1600         return j;
1601 }
1602
1603 void
1604 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1605 {
1606         struct rte_eth_dev *dev;
1607
1608         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1609         dev = &rte_eth_devices[port_id];
1610         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1611 }
1612
1613
1614 int
1615 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1616 {
1617         struct rte_eth_dev *dev;
1618
1619         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1620
1621         dev = &rte_eth_devices[port_id];
1622         *mtu = dev->data->mtu;
1623         return 0;
1624 }
1625
1626 int
1627 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1628 {
1629         int ret;
1630         struct rte_eth_dev *dev;
1631
1632         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1633         dev = &rte_eth_devices[port_id];
1634         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1635
1636         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1637         if (!ret)
1638                 dev->data->mtu = mtu;
1639
1640         return ret;
1641 }
1642
1643 int
1644 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1645 {
1646         struct rte_eth_dev *dev;
1647
1648         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1649         dev = &rte_eth_devices[port_id];
1650         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1651                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1652                 return -ENOSYS;
1653         }
1654
1655         if (vlan_id > 4095) {
1656                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1657                                 port_id, (unsigned) vlan_id);
1658                 return -EINVAL;
1659         }
1660         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1661
1662         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1663 }
1664
1665 int
1666 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1667 {
1668         struct rte_eth_dev *dev;
1669
1670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1671         dev = &rte_eth_devices[port_id];
1672         if (rx_queue_id >= dev->data->nb_rx_queues) {
1673                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1674                 return -EINVAL;
1675         }
1676
1677         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1678         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1679
1680         return 0;
1681 }
1682
1683 int
1684 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1685                                 enum rte_vlan_type vlan_type,
1686                                 uint16_t tpid)
1687 {
1688         struct rte_eth_dev *dev;
1689
1690         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1691         dev = &rte_eth_devices[port_id];
1692         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1693
1694         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1695 }
1696
1697 int
1698 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1699 {
1700         struct rte_eth_dev *dev;
1701         int ret = 0;
1702         int mask = 0;
1703         int cur, org = 0;
1704
1705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1706         dev = &rte_eth_devices[port_id];
1707
1708         /*check which option changed by application*/
1709         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1710         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1711         if (cur != org) {
1712                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1713                 mask |= ETH_VLAN_STRIP_MASK;
1714         }
1715
1716         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1717         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1718         if (cur != org) {
1719                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1720                 mask |= ETH_VLAN_FILTER_MASK;
1721         }
1722
1723         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1724         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1725         if (cur != org) {
1726                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1727                 mask |= ETH_VLAN_EXTEND_MASK;
1728         }
1729
1730         /*no change*/
1731         if (mask == 0)
1732                 return ret;
1733
1734         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1735         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1736
1737         return ret;
1738 }
1739
1740 int
1741 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1742 {
1743         struct rte_eth_dev *dev;
1744         int ret = 0;
1745
1746         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1747         dev = &rte_eth_devices[port_id];
1748
1749         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1750                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1751
1752         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1753                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1754
1755         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1756                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1757
1758         return ret;
1759 }
1760
1761 int
1762 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1763 {
1764         struct rte_eth_dev *dev;
1765
1766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1767         dev = &rte_eth_devices[port_id];
1768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1769         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1770
1771         return 0;
1772 }
1773
1774 int
1775 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1776 {
1777         struct rte_eth_dev *dev;
1778
1779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1780         dev = &rte_eth_devices[port_id];
1781         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1782         memset(fc_conf, 0, sizeof(*fc_conf));
1783         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1784 }
1785
1786 int
1787 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1788 {
1789         struct rte_eth_dev *dev;
1790
1791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1792         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1793                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1794                 return -EINVAL;
1795         }
1796
1797         dev = &rte_eth_devices[port_id];
1798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1799         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1800 }
1801
1802 int
1803 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1804 {
1805         struct rte_eth_dev *dev;
1806
1807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1808         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1809                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1810                 return -EINVAL;
1811         }
1812
1813         dev = &rte_eth_devices[port_id];
1814         /* High water, low water validation are device specific */
1815         if  (*dev->dev_ops->priority_flow_ctrl_set)
1816                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1817         return -ENOTSUP;
1818 }
1819
1820 static int
1821 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1822                         uint16_t reta_size)
1823 {
1824         uint16_t i, num;
1825
1826         if (!reta_conf)
1827                 return -EINVAL;
1828
1829         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1830                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1831                                                         RTE_RETA_GROUP_SIZE);
1832                 return -EINVAL;
1833         }
1834
1835         num = reta_size / RTE_RETA_GROUP_SIZE;
1836         for (i = 0; i < num; i++) {
1837                 if (reta_conf[i].mask)
1838                         return 0;
1839         }
1840
1841         return -EINVAL;
1842 }
1843
1844 static int
1845 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1846                          uint16_t reta_size,
1847                          uint16_t max_rxq)
1848 {
1849         uint16_t i, idx, shift;
1850
1851         if (!reta_conf)
1852                 return -EINVAL;
1853
1854         if (max_rxq == 0) {
1855                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1856                 return -EINVAL;
1857         }
1858
1859         for (i = 0; i < reta_size; i++) {
1860                 idx = i / RTE_RETA_GROUP_SIZE;
1861                 shift = i % RTE_RETA_GROUP_SIZE;
1862                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1863                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1864                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1865                                 "the maximum rxq index: %u\n", idx, shift,
1866                                 reta_conf[idx].reta[shift], max_rxq);
1867                         return -EINVAL;
1868                 }
1869         }
1870
1871         return 0;
1872 }
1873
1874 int
1875 rte_eth_dev_rss_reta_update(uint8_t port_id,
1876                             struct rte_eth_rss_reta_entry64 *reta_conf,
1877                             uint16_t reta_size)
1878 {
1879         struct rte_eth_dev *dev;
1880         int ret;
1881
1882         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1883         /* Check mask bits */
1884         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1885         if (ret < 0)
1886                 return ret;
1887
1888         dev = &rte_eth_devices[port_id];
1889
1890         /* Check entry value */
1891         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1892                                 dev->data->nb_rx_queues);
1893         if (ret < 0)
1894                 return ret;
1895
1896         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1897         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1898 }
1899
1900 int
1901 rte_eth_dev_rss_reta_query(uint8_t port_id,
1902                            struct rte_eth_rss_reta_entry64 *reta_conf,
1903                            uint16_t reta_size)
1904 {
1905         struct rte_eth_dev *dev;
1906         int ret;
1907
1908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1909
1910         /* Check mask bits */
1911         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1912         if (ret < 0)
1913                 return ret;
1914
1915         dev = &rte_eth_devices[port_id];
1916         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1917         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1918 }
1919
1920 int
1921 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1922 {
1923         struct rte_eth_dev *dev;
1924         uint16_t rss_hash_protos;
1925
1926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1927         rss_hash_protos = rss_conf->rss_hf;
1928         if ((rss_hash_protos != 0) &&
1929             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1930                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1931                                 rss_hash_protos);
1932                 return -EINVAL;
1933         }
1934         dev = &rte_eth_devices[port_id];
1935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1936         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1937 }
1938
1939 int
1940 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1941                               struct rte_eth_rss_conf *rss_conf)
1942 {
1943         struct rte_eth_dev *dev;
1944
1945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1946         dev = &rte_eth_devices[port_id];
1947         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1948         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1949 }
1950
1951 int
1952 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1953                                 struct rte_eth_udp_tunnel *udp_tunnel)
1954 {
1955         struct rte_eth_dev *dev;
1956
1957         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1958         if (udp_tunnel == NULL) {
1959                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1960                 return -EINVAL;
1961         }
1962
1963         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1964                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1965                 return -EINVAL;
1966         }
1967
1968         dev = &rte_eth_devices[port_id];
1969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
1970         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
1971 }
1972
1973 int
1974 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
1975                                    struct rte_eth_udp_tunnel *udp_tunnel)
1976 {
1977         struct rte_eth_dev *dev;
1978
1979         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1980         dev = &rte_eth_devices[port_id];
1981
1982         if (udp_tunnel == NULL) {
1983                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1984                 return -EINVAL;
1985         }
1986
1987         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1988                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1989                 return -EINVAL;
1990         }
1991
1992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
1993         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
1994 }
1995
1996 int
1997 rte_eth_led_on(uint8_t port_id)
1998 {
1999         struct rte_eth_dev *dev;
2000
2001         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2002         dev = &rte_eth_devices[port_id];
2003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2004         return (*dev->dev_ops->dev_led_on)(dev);
2005 }
2006
2007 int
2008 rte_eth_led_off(uint8_t port_id)
2009 {
2010         struct rte_eth_dev *dev;
2011
2012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2013         dev = &rte_eth_devices[port_id];
2014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2015         return (*dev->dev_ops->dev_led_off)(dev);
2016 }
2017
2018 /*
2019  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2020  * an empty spot.
2021  */
2022 static int
2023 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2024 {
2025         struct rte_eth_dev_info dev_info;
2026         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2027         unsigned i;
2028
2029         rte_eth_dev_info_get(port_id, &dev_info);
2030
2031         for (i = 0; i < dev_info.max_mac_addrs; i++)
2032                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2033                         return i;
2034
2035         return -1;
2036 }
2037
2038 static const struct ether_addr null_mac_addr;
2039
2040 int
2041 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2042                         uint32_t pool)
2043 {
2044         struct rte_eth_dev *dev;
2045         int index;
2046         uint64_t pool_mask;
2047
2048         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2049         dev = &rte_eth_devices[port_id];
2050         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2051
2052         if (is_zero_ether_addr(addr)) {
2053                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2054                         port_id);
2055                 return -EINVAL;
2056         }
2057         if (pool >= ETH_64_POOLS) {
2058                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2059                 return -EINVAL;
2060         }
2061
2062         index = get_mac_addr_index(port_id, addr);
2063         if (index < 0) {
2064                 index = get_mac_addr_index(port_id, &null_mac_addr);
2065                 if (index < 0) {
2066                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2067                                 port_id);
2068                         return -ENOSPC;
2069                 }
2070         } else {
2071                 pool_mask = dev->data->mac_pool_sel[index];
2072
2073                 /* Check if both MAC address and pool is already there, and do nothing */
2074                 if (pool_mask & (1ULL << pool))
2075                         return 0;
2076         }
2077
2078         /* Update NIC */
2079         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2080
2081         /* Update address in NIC data structure */
2082         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2083
2084         /* Update pool bitmap in NIC data structure */
2085         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2086
2087         return 0;
2088 }
2089
2090 int
2091 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2092 {
2093         struct rte_eth_dev *dev;
2094         int index;
2095
2096         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2097         dev = &rte_eth_devices[port_id];
2098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2099
2100         index = get_mac_addr_index(port_id, addr);
2101         if (index == 0) {
2102                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2103                 return -EADDRINUSE;
2104         } else if (index < 0)
2105                 return 0;  /* Do nothing if address wasn't found */
2106
2107         /* Update NIC */
2108         (*dev->dev_ops->mac_addr_remove)(dev, index);
2109
2110         /* Update address in NIC data structure */
2111         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2112
2113         /* reset pool bitmap */
2114         dev->data->mac_pool_sel[index] = 0;
2115
2116         return 0;
2117 }
2118
2119 int
2120 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2121 {
2122         struct rte_eth_dev *dev;
2123
2124         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2125
2126         if (!is_valid_assigned_ether_addr(addr))
2127                 return -EINVAL;
2128
2129         dev = &rte_eth_devices[port_id];
2130         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2131
2132         /* Update default address in NIC data structure */
2133         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2134
2135         (*dev->dev_ops->mac_addr_set)(dev, addr);
2136
2137         return 0;
2138 }
2139
2140 int
2141 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2142                                 uint16_t rx_mode, uint8_t on)
2143 {
2144         uint16_t num_vfs;
2145         struct rte_eth_dev *dev;
2146         struct rte_eth_dev_info dev_info;
2147
2148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2149
2150         dev = &rte_eth_devices[port_id];
2151         rte_eth_dev_info_get(port_id, &dev_info);
2152
2153         num_vfs = dev_info.max_vfs;
2154         if (vf > num_vfs) {
2155                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2156                 return -EINVAL;
2157         }
2158
2159         if (rx_mode == 0) {
2160                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2161                 return -EINVAL;
2162         }
2163         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2164         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2165 }
2166
2167 /*
2168  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2169  * an empty spot.
2170  */
2171 static int
2172 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2173 {
2174         struct rte_eth_dev_info dev_info;
2175         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2176         unsigned i;
2177
2178         rte_eth_dev_info_get(port_id, &dev_info);
2179         if (!dev->data->hash_mac_addrs)
2180                 return -1;
2181
2182         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2183                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2184                         ETHER_ADDR_LEN) == 0)
2185                         return i;
2186
2187         return -1;
2188 }
2189
2190 int
2191 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2192                                 uint8_t on)
2193 {
2194         int index;
2195         int ret;
2196         struct rte_eth_dev *dev;
2197
2198         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2199
2200         dev = &rte_eth_devices[port_id];
2201         if (is_zero_ether_addr(addr)) {
2202                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2203                         port_id);
2204                 return -EINVAL;
2205         }
2206
2207         index = get_hash_mac_addr_index(port_id, addr);
2208         /* Check if it's already there, and do nothing */
2209         if ((index >= 0) && (on))
2210                 return 0;
2211
2212         if (index < 0) {
2213                 if (!on) {
2214                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2215                                 "set in UTA\n", port_id);
2216                         return -EINVAL;
2217                 }
2218
2219                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2220                 if (index < 0) {
2221                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2222                                         port_id);
2223                         return -ENOSPC;
2224                 }
2225         }
2226
2227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2228         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2229         if (ret == 0) {
2230                 /* Update address in NIC data structure */
2231                 if (on)
2232                         ether_addr_copy(addr,
2233                                         &dev->data->hash_mac_addrs[index]);
2234                 else
2235                         ether_addr_copy(&null_mac_addr,
2236                                         &dev->data->hash_mac_addrs[index]);
2237         }
2238
2239         return ret;
2240 }
2241
2242 int
2243 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2244 {
2245         struct rte_eth_dev *dev;
2246
2247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2248
2249         dev = &rte_eth_devices[port_id];
2250
2251         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2252         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2253 }
2254
2255 int
2256 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2257 {
2258         uint16_t num_vfs;
2259         struct rte_eth_dev *dev;
2260         struct rte_eth_dev_info dev_info;
2261
2262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2263
2264         dev = &rte_eth_devices[port_id];
2265         rte_eth_dev_info_get(port_id, &dev_info);
2266
2267         num_vfs = dev_info.max_vfs;
2268         if (vf > num_vfs) {
2269                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2270                 return -EINVAL;
2271         }
2272
2273         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2274         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2275 }
2276
2277 int
2278 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2279 {
2280         uint16_t num_vfs;
2281         struct rte_eth_dev *dev;
2282         struct rte_eth_dev_info dev_info;
2283
2284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2285
2286         dev = &rte_eth_devices[port_id];
2287         rte_eth_dev_info_get(port_id, &dev_info);
2288
2289         num_vfs = dev_info.max_vfs;
2290         if (vf > num_vfs) {
2291                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2292                 return -EINVAL;
2293         }
2294
2295         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2296         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2297 }
2298
2299 int
2300 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2301                                uint64_t vf_mask, uint8_t vlan_on)
2302 {
2303         struct rte_eth_dev *dev;
2304
2305         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2306
2307         dev = &rte_eth_devices[port_id];
2308
2309         if (vlan_id > ETHER_MAX_VLAN_ID) {
2310                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2311                         vlan_id);
2312                 return -EINVAL;
2313         }
2314
2315         if (vf_mask == 0) {
2316                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2317                 return -EINVAL;
2318         }
2319
2320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2321         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2322                                                    vf_mask, vlan_on);
2323 }
2324
2325 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2326                                         uint16_t tx_rate)
2327 {
2328         struct rte_eth_dev *dev;
2329         struct rte_eth_dev_info dev_info;
2330         struct rte_eth_link link;
2331
2332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2333
2334         dev = &rte_eth_devices[port_id];
2335         rte_eth_dev_info_get(port_id, &dev_info);
2336         link = dev->data->dev_link;
2337
2338         if (queue_idx > dev_info.max_tx_queues) {
2339                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2340                                 "invalid queue id=%d\n", port_id, queue_idx);
2341                 return -EINVAL;
2342         }
2343
2344         if (tx_rate > link.link_speed) {
2345                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2346                                 "bigger than link speed= %d\n",
2347                         tx_rate, link.link_speed);
2348                 return -EINVAL;
2349         }
2350
2351         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2352         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2353 }
2354
2355 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2356                                 uint64_t q_msk)
2357 {
2358         struct rte_eth_dev *dev;
2359         struct rte_eth_dev_info dev_info;
2360         struct rte_eth_link link;
2361
2362         if (q_msk == 0)
2363                 return 0;
2364
2365         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2366
2367         dev = &rte_eth_devices[port_id];
2368         rte_eth_dev_info_get(port_id, &dev_info);
2369         link = dev->data->dev_link;
2370
2371         if (vf > dev_info.max_vfs) {
2372                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2373                                 "invalid vf id=%d\n", port_id, vf);
2374                 return -EINVAL;
2375         }
2376
2377         if (tx_rate > link.link_speed) {
2378                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2379                                 "bigger than link speed= %d\n",
2380                                 tx_rate, link.link_speed);
2381                 return -EINVAL;
2382         }
2383
2384         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2385         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2386 }
2387
2388 int
2389 rte_eth_mirror_rule_set(uint8_t port_id,
2390                         struct rte_eth_mirror_conf *mirror_conf,
2391                         uint8_t rule_id, uint8_t on)
2392 {
2393         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2394
2395         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2396         if (mirror_conf->rule_type == 0) {
2397                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2398                 return -EINVAL;
2399         }
2400
2401         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2402                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2403                                 ETH_64_POOLS - 1);
2404                 return -EINVAL;
2405         }
2406
2407         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2408              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2409             (mirror_conf->pool_mask == 0)) {
2410                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2411                 return -EINVAL;
2412         }
2413
2414         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2415             mirror_conf->vlan.vlan_mask == 0) {
2416                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2417                 return -EINVAL;
2418         }
2419
2420         dev = &rte_eth_devices[port_id];
2421         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2422
2423         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2424 }
2425
2426 int
2427 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2428 {
2429         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2430
2431         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2432
2433         dev = &rte_eth_devices[port_id];
2434         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2435
2436         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2437 }
2438
2439 int
2440 rte_eth_dev_callback_register(uint8_t port_id,
2441                         enum rte_eth_event_type event,
2442                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2443 {
2444         struct rte_eth_dev *dev;
2445         struct rte_eth_dev_callback *user_cb;
2446
2447         if (!cb_fn)
2448                 return -EINVAL;
2449
2450         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2451
2452         dev = &rte_eth_devices[port_id];
2453         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2454
2455         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2456                 if (user_cb->cb_fn == cb_fn &&
2457                         user_cb->cb_arg == cb_arg &&
2458                         user_cb->event == event) {
2459                         break;
2460                 }
2461         }
2462
2463         /* create a new callback. */
2464         if (user_cb == NULL) {
2465                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2466                                         sizeof(struct rte_eth_dev_callback), 0);
2467                 if (user_cb != NULL) {
2468                         user_cb->cb_fn = cb_fn;
2469                         user_cb->cb_arg = cb_arg;
2470                         user_cb->event = event;
2471                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2472                 }
2473         }
2474
2475         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2476         return (user_cb == NULL) ? -ENOMEM : 0;
2477 }
2478
2479 int
2480 rte_eth_dev_callback_unregister(uint8_t port_id,
2481                         enum rte_eth_event_type event,
2482                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2483 {
2484         int ret;
2485         struct rte_eth_dev *dev;
2486         struct rte_eth_dev_callback *cb, *next;
2487
2488         if (!cb_fn)
2489                 return -EINVAL;
2490
2491         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2492
2493         dev = &rte_eth_devices[port_id];
2494         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2495
2496         ret = 0;
2497         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2498
2499                 next = TAILQ_NEXT(cb, next);
2500
2501                 if (cb->cb_fn != cb_fn || cb->event != event ||
2502                                 (cb->cb_arg != (void *)-1 &&
2503                                 cb->cb_arg != cb_arg))
2504                         continue;
2505
2506                 /*
2507                  * if this callback is not executing right now,
2508                  * then remove it.
2509                  */
2510                 if (cb->active == 0) {
2511                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2512                         rte_free(cb);
2513                 } else {
2514                         ret = -EAGAIN;
2515                 }
2516         }
2517
2518         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2519         return ret;
2520 }
2521
2522 void
2523 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2524         enum rte_eth_event_type event, void *cb_arg)
2525 {
2526         struct rte_eth_dev_callback *cb_lst;
2527         struct rte_eth_dev_callback dev_cb;
2528
2529         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2530         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2531                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2532                         continue;
2533                 dev_cb = *cb_lst;
2534                 cb_lst->active = 1;
2535                 if (cb_arg != NULL)
2536                         dev_cb.cb_arg = (void *) cb_arg;
2537
2538                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2539                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2540                                                 dev_cb.cb_arg);
2541                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2542                 cb_lst->active = 0;
2543         }
2544         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2545 }
2546
2547 int
2548 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2549 {
2550         uint32_t vec;
2551         struct rte_eth_dev *dev;
2552         struct rte_intr_handle *intr_handle;
2553         uint16_t qid;
2554         int rc;
2555
2556         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2557
2558         dev = &rte_eth_devices[port_id];
2559         intr_handle = &dev->pci_dev->intr_handle;
2560         if (!intr_handle->intr_vec) {
2561                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2562                 return -EPERM;
2563         }
2564
2565         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2566                 vec = intr_handle->intr_vec[qid];
2567                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2568                 if (rc && rc != -EEXIST) {
2569                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2570                                         " op %d epfd %d vec %u\n",
2571                                         port_id, qid, op, epfd, vec);
2572                 }
2573         }
2574
2575         return 0;
2576 }
2577
2578 const struct rte_memzone *
2579 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2580                          uint16_t queue_id, size_t size, unsigned align,
2581                          int socket_id)
2582 {
2583         char z_name[RTE_MEMZONE_NAMESIZE];
2584         const struct rte_memzone *mz;
2585
2586         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2587                  dev->driver->pci_drv.driver.name, ring_name,
2588                  dev->data->port_id, queue_id);
2589
2590         mz = rte_memzone_lookup(z_name);
2591         if (mz)
2592                 return mz;
2593
2594         if (rte_xen_dom0_supported())
2595                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2596                                                    0, align, RTE_PGSIZE_2M);
2597         else
2598                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2599                                                    0, align);
2600 }
2601
2602 int
2603 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2604                           int epfd, int op, void *data)
2605 {
2606         uint32_t vec;
2607         struct rte_eth_dev *dev;
2608         struct rte_intr_handle *intr_handle;
2609         int rc;
2610
2611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2612
2613         dev = &rte_eth_devices[port_id];
2614         if (queue_id >= dev->data->nb_rx_queues) {
2615                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2616                 return -EINVAL;
2617         }
2618
2619         intr_handle = &dev->pci_dev->intr_handle;
2620         if (!intr_handle->intr_vec) {
2621                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2622                 return -EPERM;
2623         }
2624
2625         vec = intr_handle->intr_vec[queue_id];
2626         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2627         if (rc && rc != -EEXIST) {
2628                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2629                                 " op %d epfd %d vec %u\n",
2630                                 port_id, queue_id, op, epfd, vec);
2631                 return rc;
2632         }
2633
2634         return 0;
2635 }
2636
2637 int
2638 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2639                            uint16_t queue_id)
2640 {
2641         struct rte_eth_dev *dev;
2642
2643         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2644
2645         dev = &rte_eth_devices[port_id];
2646
2647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2648         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2649 }
2650
2651 int
2652 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2653                             uint16_t queue_id)
2654 {
2655         struct rte_eth_dev *dev;
2656
2657         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2658
2659         dev = &rte_eth_devices[port_id];
2660
2661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2662         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2663 }
2664
2665 #ifdef RTE_NIC_BYPASS
2666 int rte_eth_dev_bypass_init(uint8_t port_id)
2667 {
2668         struct rte_eth_dev *dev;
2669
2670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2671
2672         dev = &rte_eth_devices[port_id];
2673         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2674         (*dev->dev_ops->bypass_init)(dev);
2675         return 0;
2676 }
2677
2678 int
2679 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2680 {
2681         struct rte_eth_dev *dev;
2682
2683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684
2685         dev = &rte_eth_devices[port_id];
2686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2687         (*dev->dev_ops->bypass_state_show)(dev, state);
2688         return 0;
2689 }
2690
2691 int
2692 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2693 {
2694         struct rte_eth_dev *dev;
2695
2696         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2697
2698         dev = &rte_eth_devices[port_id];
2699         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2700         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2701         return 0;
2702 }
2703
2704 int
2705 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2706 {
2707         struct rte_eth_dev *dev;
2708
2709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2710
2711         dev = &rte_eth_devices[port_id];
2712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2713         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2714         return 0;
2715 }
2716
2717 int
2718 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2719 {
2720         struct rte_eth_dev *dev;
2721
2722         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2723
2724         dev = &rte_eth_devices[port_id];
2725
2726         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2727         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2728         return 0;
2729 }
2730
2731 int
2732 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2733 {
2734         struct rte_eth_dev *dev;
2735
2736         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2737
2738         dev = &rte_eth_devices[port_id];
2739
2740         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2741         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2742         return 0;
2743 }
2744
2745 int
2746 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2747 {
2748         struct rte_eth_dev *dev;
2749
2750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2751
2752         dev = &rte_eth_devices[port_id];
2753
2754         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2755         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2756         return 0;
2757 }
2758
2759 int
2760 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2761 {
2762         struct rte_eth_dev *dev;
2763
2764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2765
2766         dev = &rte_eth_devices[port_id];
2767
2768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2769         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2770         return 0;
2771 }
2772
2773 int
2774 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2775 {
2776         struct rte_eth_dev *dev;
2777
2778         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2779
2780         dev = &rte_eth_devices[port_id];
2781
2782         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2783         (*dev->dev_ops->bypass_wd_reset)(dev);
2784         return 0;
2785 }
2786 #endif
2787
2788 int
2789 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2790 {
2791         struct rte_eth_dev *dev;
2792
2793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2794
2795         dev = &rte_eth_devices[port_id];
2796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2797         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2798                                 RTE_ETH_FILTER_NOP, NULL);
2799 }
2800
2801 int
2802 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2803                        enum rte_filter_op filter_op, void *arg)
2804 {
2805         struct rte_eth_dev *dev;
2806
2807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2808
2809         dev = &rte_eth_devices[port_id];
2810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2811         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2812 }
2813
2814 void *
2815 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2816                 rte_rx_callback_fn fn, void *user_param)
2817 {
2818 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2819         rte_errno = ENOTSUP;
2820         return NULL;
2821 #endif
2822         /* check input parameters */
2823         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2824                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2825                 rte_errno = EINVAL;
2826                 return NULL;
2827         }
2828         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2829
2830         if (cb == NULL) {
2831                 rte_errno = ENOMEM;
2832                 return NULL;
2833         }
2834
2835         cb->fn.rx = fn;
2836         cb->param = user_param;
2837
2838         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2839         /* Add the callbacks in fifo order. */
2840         struct rte_eth_rxtx_callback *tail =
2841                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2842
2843         if (!tail) {
2844                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2845
2846         } else {
2847                 while (tail->next)
2848                         tail = tail->next;
2849                 tail->next = cb;
2850         }
2851         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2852
2853         return cb;
2854 }
2855
2856 void *
2857 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2858                 rte_rx_callback_fn fn, void *user_param)
2859 {
2860 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2861         rte_errno = ENOTSUP;
2862         return NULL;
2863 #endif
2864         /* check input parameters */
2865         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2866                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2867                 rte_errno = EINVAL;
2868                 return NULL;
2869         }
2870
2871         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2872
2873         if (cb == NULL) {
2874                 rte_errno = ENOMEM;
2875                 return NULL;
2876         }
2877
2878         cb->fn.rx = fn;
2879         cb->param = user_param;
2880
2881         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2882         /* Add the callbacks at fisrt position*/
2883         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2884         rte_smp_wmb();
2885         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2886         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2887
2888         return cb;
2889 }
2890
2891 void *
2892 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2893                 rte_tx_callback_fn fn, void *user_param)
2894 {
2895 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2896         rte_errno = ENOTSUP;
2897         return NULL;
2898 #endif
2899         /* check input parameters */
2900         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2901                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2902                 rte_errno = EINVAL;
2903                 return NULL;
2904         }
2905
2906         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2907
2908         if (cb == NULL) {
2909                 rte_errno = ENOMEM;
2910                 return NULL;
2911         }
2912
2913         cb->fn.tx = fn;
2914         cb->param = user_param;
2915
2916         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2917         /* Add the callbacks in fifo order. */
2918         struct rte_eth_rxtx_callback *tail =
2919                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2920
2921         if (!tail) {
2922                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2923
2924         } else {
2925                 while (tail->next)
2926                         tail = tail->next;
2927                 tail->next = cb;
2928         }
2929         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2930
2931         return cb;
2932 }
2933
2934 int
2935 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2936                 struct rte_eth_rxtx_callback *user_cb)
2937 {
2938 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2939         return -ENOTSUP;
2940 #endif
2941         /* Check input parameters. */
2942         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2943         if (user_cb == NULL ||
2944                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2945                 return -EINVAL;
2946
2947         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2948         struct rte_eth_rxtx_callback *cb;
2949         struct rte_eth_rxtx_callback **prev_cb;
2950         int ret = -EINVAL;
2951
2952         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2953         prev_cb = &dev->post_rx_burst_cbs[queue_id];
2954         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2955                 cb = *prev_cb;
2956                 if (cb == user_cb) {
2957                         /* Remove the user cb from the callback list. */
2958                         *prev_cb = cb->next;
2959                         ret = 0;
2960                         break;
2961                 }
2962         }
2963         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2964
2965         return ret;
2966 }
2967
2968 int
2969 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2970                 struct rte_eth_rxtx_callback *user_cb)
2971 {
2972 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2973         return -ENOTSUP;
2974 #endif
2975         /* Check input parameters. */
2976         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2977         if (user_cb == NULL ||
2978                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
2979                 return -EINVAL;
2980
2981         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2982         int ret = -EINVAL;
2983         struct rte_eth_rxtx_callback *cb;
2984         struct rte_eth_rxtx_callback **prev_cb;
2985
2986         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2987         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
2988         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2989                 cb = *prev_cb;
2990                 if (cb == user_cb) {
2991                         /* Remove the user cb from the callback list. */
2992                         *prev_cb = cb->next;
2993                         ret = 0;
2994                         break;
2995                 }
2996         }
2997         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2998
2999         return ret;
3000 }
3001
3002 int
3003 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3004         struct rte_eth_rxq_info *qinfo)
3005 {
3006         struct rte_eth_dev *dev;
3007
3008         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3009
3010         if (qinfo == NULL)
3011                 return -EINVAL;
3012
3013         dev = &rte_eth_devices[port_id];
3014         if (queue_id >= dev->data->nb_rx_queues) {
3015                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3016                 return -EINVAL;
3017         }
3018
3019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3020
3021         memset(qinfo, 0, sizeof(*qinfo));
3022         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3023         return 0;
3024 }
3025
3026 int
3027 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3028         struct rte_eth_txq_info *qinfo)
3029 {
3030         struct rte_eth_dev *dev;
3031
3032         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3033
3034         if (qinfo == NULL)
3035                 return -EINVAL;
3036
3037         dev = &rte_eth_devices[port_id];
3038         if (queue_id >= dev->data->nb_tx_queues) {
3039                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3040                 return -EINVAL;
3041         }
3042
3043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3044
3045         memset(qinfo, 0, sizeof(*qinfo));
3046         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3047         return 0;
3048 }
3049
3050 int
3051 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3052                              struct ether_addr *mc_addr_set,
3053                              uint32_t nb_mc_addr)
3054 {
3055         struct rte_eth_dev *dev;
3056
3057         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3058
3059         dev = &rte_eth_devices[port_id];
3060         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3061         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3062 }
3063
3064 int
3065 rte_eth_timesync_enable(uint8_t port_id)
3066 {
3067         struct rte_eth_dev *dev;
3068
3069         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3070         dev = &rte_eth_devices[port_id];
3071
3072         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3073         return (*dev->dev_ops->timesync_enable)(dev);
3074 }
3075
3076 int
3077 rte_eth_timesync_disable(uint8_t port_id)
3078 {
3079         struct rte_eth_dev *dev;
3080
3081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3082         dev = &rte_eth_devices[port_id];
3083
3084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3085         return (*dev->dev_ops->timesync_disable)(dev);
3086 }
3087
3088 int
3089 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3090                                    uint32_t flags)
3091 {
3092         struct rte_eth_dev *dev;
3093
3094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3095         dev = &rte_eth_devices[port_id];
3096
3097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3098         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3099 }
3100
3101 int
3102 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3103 {
3104         struct rte_eth_dev *dev;
3105
3106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3107         dev = &rte_eth_devices[port_id];
3108
3109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3110         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3111 }
3112
3113 int
3114 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3115 {
3116         struct rte_eth_dev *dev;
3117
3118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3119         dev = &rte_eth_devices[port_id];
3120
3121         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3122         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3123 }
3124
3125 int
3126 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3127 {
3128         struct rte_eth_dev *dev;
3129
3130         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3131         dev = &rte_eth_devices[port_id];
3132
3133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3134         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3135 }
3136
3137 int
3138 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3139 {
3140         struct rte_eth_dev *dev;
3141
3142         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3143         dev = &rte_eth_devices[port_id];
3144
3145         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3146         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3147 }
3148
3149 int
3150 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3151 {
3152         struct rte_eth_dev *dev;
3153
3154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3155
3156         dev = &rte_eth_devices[port_id];
3157         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3158         return (*dev->dev_ops->get_reg)(dev, info);
3159 }
3160
3161 int
3162 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3163 {
3164         struct rte_eth_dev *dev;
3165
3166         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3167
3168         dev = &rte_eth_devices[port_id];
3169         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3170         return (*dev->dev_ops->get_eeprom_length)(dev);
3171 }
3172
3173 int
3174 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3175 {
3176         struct rte_eth_dev *dev;
3177
3178         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179
3180         dev = &rte_eth_devices[port_id];
3181         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3182         return (*dev->dev_ops->get_eeprom)(dev, info);
3183 }
3184
3185 int
3186 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3187 {
3188         struct rte_eth_dev *dev;
3189
3190         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3191
3192         dev = &rte_eth_devices[port_id];
3193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3194         return (*dev->dev_ops->set_eeprom)(dev, info);
3195 }
3196
3197 int
3198 rte_eth_dev_get_dcb_info(uint8_t port_id,
3199                              struct rte_eth_dcb_info *dcb_info)
3200 {
3201         struct rte_eth_dev *dev;
3202
3203         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3204
3205         dev = &rte_eth_devices[port_id];
3206         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3207
3208         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3209         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3210 }
3211
3212 void
3213 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3214 {
3215         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3216                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3217                                 eth_dev, pci_dev);
3218                 return;
3219         }
3220
3221         eth_dev->data->dev_flags = 0;
3222         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3223                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3224         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3225                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3226
3227         eth_dev->data->kdrv = pci_dev->kdrv;
3228         eth_dev->data->numa_node = pci_dev->device.numa_node;
3229         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3230 }
3231
3232 int
3233 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3234                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3235 {
3236         struct rte_eth_dev *dev;
3237
3238         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3239         if (l2_tunnel == NULL) {
3240                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3241                 return -EINVAL;
3242         }
3243
3244         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3245                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3246                 return -EINVAL;
3247         }
3248
3249         dev = &rte_eth_devices[port_id];
3250         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3251                                 -ENOTSUP);
3252         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3253 }
3254
3255 int
3256 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3257                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3258                                   uint32_t mask,
3259                                   uint8_t en)
3260 {
3261         struct rte_eth_dev *dev;
3262
3263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3264
3265         if (l2_tunnel == NULL) {
3266                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3267                 return -EINVAL;
3268         }
3269
3270         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3271                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3272                 return -EINVAL;
3273         }
3274
3275         if (mask == 0) {
3276                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3277                 return -EINVAL;
3278         }
3279
3280         dev = &rte_eth_devices[port_id];
3281         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3282                                 -ENOTSUP);
3283         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3284 }