ethdev: initialize more fields on allocation
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 struct rte_eth_dev *
193 rte_eth_dev_allocate(const char *name)
194 {
195         uint8_t port_id;
196         struct rte_eth_dev *eth_dev;
197
198         port_id = rte_eth_dev_find_free_port();
199         if (port_id == RTE_MAX_ETHPORTS) {
200                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201                 return NULL;
202         }
203
204         if (rte_eth_dev_data == NULL)
205                 rte_eth_dev_data_alloc();
206
207         if (rte_eth_dev_allocated(name) != NULL) {
208                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209                                 name);
210                 return NULL;
211         }
212
213         eth_dev = &rte_eth_devices[port_id];
214         eth_dev->data = &rte_eth_dev_data[port_id];
215         memset(eth_dev->data, 0, sizeof(*eth_dev->data));
216         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
217         eth_dev->data->port_id = port_id;
218         eth_dev->data->mtu = ETHER_MTU;
219         TAILQ_INIT(&(eth_dev->link_intr_cbs));
220
221         eth_dev->attached = DEV_ATTACHED;
222         eth_dev_last_created_port = port_id;
223         nb_ports++;
224         return eth_dev;
225 }
226
227 int
228 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
229 {
230         if (eth_dev == NULL)
231                 return -EINVAL;
232
233         eth_dev->attached = DEV_DETACHED;
234         nb_ports--;
235         return 0;
236 }
237
238 int
239 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
240                       struct rte_pci_device *pci_dev)
241 {
242         struct eth_driver    *eth_drv;
243         struct rte_eth_dev *eth_dev;
244         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
245
246         int diag;
247
248         eth_drv = (struct eth_driver *)pci_drv;
249
250         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
251                         sizeof(ethdev_name));
252
253         eth_dev = rte_eth_dev_allocate(ethdev_name);
254         if (eth_dev == NULL)
255                 return -ENOMEM;
256
257         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
258                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
259                                   eth_drv->dev_private_size,
260                                   RTE_CACHE_LINE_SIZE);
261                 if (eth_dev->data->dev_private == NULL)
262                         rte_panic("Cannot allocate memzone for private port data\n");
263         }
264         eth_dev->pci_dev = pci_dev;
265         eth_dev->driver = eth_drv;
266
267         /* Invoke PMD device initialization function */
268         diag = (*eth_drv->eth_dev_init)(eth_dev);
269         if (diag == 0)
270                 return 0;
271
272         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
273                         pci_drv->driver.name,
274                         (unsigned) pci_dev->id.vendor_id,
275                         (unsigned) pci_dev->id.device_id);
276         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
277                 rte_free(eth_dev->data->dev_private);
278         rte_eth_dev_release_port(eth_dev);
279         return diag;
280 }
281
282 int
283 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
284 {
285         const struct eth_driver *eth_drv;
286         struct rte_eth_dev *eth_dev;
287         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
288         int ret;
289
290         if (pci_dev == NULL)
291                 return -EINVAL;
292
293         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
294                         sizeof(ethdev_name));
295
296         eth_dev = rte_eth_dev_allocated(ethdev_name);
297         if (eth_dev == NULL)
298                 return -ENODEV;
299
300         eth_drv = (const struct eth_driver *)pci_dev->driver;
301
302         /* Invoke PMD device uninit function */
303         if (*eth_drv->eth_dev_uninit) {
304                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
305                 if (ret)
306                         return ret;
307         }
308
309         /* free ether device */
310         rte_eth_dev_release_port(eth_dev);
311
312         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
313                 rte_free(eth_dev->data->dev_private);
314
315         eth_dev->pci_dev = NULL;
316         eth_dev->driver = NULL;
317         eth_dev->data = NULL;
318
319         return 0;
320 }
321
322 int
323 rte_eth_dev_is_valid_port(uint8_t port_id)
324 {
325         if (port_id >= RTE_MAX_ETHPORTS ||
326             rte_eth_devices[port_id].attached != DEV_ATTACHED)
327                 return 0;
328         else
329                 return 1;
330 }
331
332 int
333 rte_eth_dev_socket_id(uint8_t port_id)
334 {
335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
336         return rte_eth_devices[port_id].data->numa_node;
337 }
338
339 uint8_t
340 rte_eth_dev_count(void)
341 {
342         return nb_ports;
343 }
344
345 int
346 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
347 {
348         char *tmp;
349
350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
351
352         if (name == NULL) {
353                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
354                 return -EINVAL;
355         }
356
357         /* shouldn't check 'rte_eth_devices[i].data',
358          * because it might be overwritten by VDEV PMD */
359         tmp = rte_eth_dev_data[port_id].name;
360         strcpy(name, tmp);
361         return 0;
362 }
363
364 int
365 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
366 {
367         int i;
368
369         if (name == NULL) {
370                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
371                 return -EINVAL;
372         }
373
374         if (!nb_ports)
375                 return -ENODEV;
376
377         *port_id = RTE_MAX_ETHPORTS;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380
381                 if (!strncmp(name,
382                         rte_eth_dev_data[i].name, strlen(name))) {
383
384                         *port_id = i;
385
386                         return 0;
387                 }
388         }
389         return -ENODEV;
390 }
391
392 static int
393 rte_eth_dev_is_detachable(uint8_t port_id)
394 {
395         uint32_t dev_flags;
396
397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
398
399         switch (rte_eth_devices[port_id].data->kdrv) {
400         case RTE_KDRV_IGB_UIO:
401         case RTE_KDRV_UIO_GENERIC:
402         case RTE_KDRV_NIC_UIO:
403         case RTE_KDRV_NONE:
404                 break;
405         case RTE_KDRV_VFIO:
406         default:
407                 return -ENOTSUP;
408         }
409         dev_flags = rte_eth_devices[port_id].data->dev_flags;
410         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
411                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
412                 return 0;
413         else
414                 return 1;
415 }
416
417 /* attach the new device, then store port_id of the device */
418 int
419 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
420 {
421         int ret = -1;
422         int current = rte_eth_dev_count();
423         char *name = NULL;
424         char *args = NULL;
425
426         if ((devargs == NULL) || (port_id == NULL)) {
427                 ret = -EINVAL;
428                 goto err;
429         }
430
431         /* parse devargs, then retrieve device name and args */
432         if (rte_eal_parse_devargs_str(devargs, &name, &args))
433                 goto err;
434
435         ret = rte_eal_dev_attach(name, args);
436         if (ret < 0)
437                 goto err;
438
439         /* no point looking at the port count if no port exists */
440         if (!rte_eth_dev_count()) {
441                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
442                 ret = -1;
443                 goto err;
444         }
445
446         /* if nothing happened, there is a bug here, since some driver told us
447          * it did attach a device, but did not create a port.
448          */
449         if (current == rte_eth_dev_count()) {
450                 ret = -1;
451                 goto err;
452         }
453
454         *port_id = eth_dev_last_created_port;
455         ret = 0;
456
457 err:
458         free(name);
459         free(args);
460         return ret;
461 }
462
463 /* detach the device, then store the name of the device */
464 int
465 rte_eth_dev_detach(uint8_t port_id, char *name)
466 {
467         int ret = -1;
468
469         if (name == NULL) {
470                 ret = -EINVAL;
471                 goto err;
472         }
473
474         /* FIXME: move this to eal, once device flags are relocated there */
475         if (rte_eth_dev_is_detachable(port_id))
476                 goto err;
477
478         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
479                  "%s", rte_eth_devices[port_id].data->name);
480         ret = rte_eal_dev_detach(name);
481         if (ret < 0)
482                 goto err;
483
484         return 0;
485
486 err:
487         return ret;
488 }
489
490 static int
491 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
492 {
493         uint16_t old_nb_queues = dev->data->nb_rx_queues;
494         void **rxq;
495         unsigned i;
496
497         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
498                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
499                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
500                                 RTE_CACHE_LINE_SIZE);
501                 if (dev->data->rx_queues == NULL) {
502                         dev->data->nb_rx_queues = 0;
503                         return -(ENOMEM);
504                 }
505         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
506                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
507
508                 rxq = dev->data->rx_queues;
509
510                 for (i = nb_queues; i < old_nb_queues; i++)
511                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
512                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
513                                 RTE_CACHE_LINE_SIZE);
514                 if (rxq == NULL)
515                         return -(ENOMEM);
516                 if (nb_queues > old_nb_queues) {
517                         uint16_t new_qs = nb_queues - old_nb_queues;
518
519                         memset(rxq + old_nb_queues, 0,
520                                 sizeof(rxq[0]) * new_qs);
521                 }
522
523                 dev->data->rx_queues = rxq;
524
525         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
526                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
527
528                 rxq = dev->data->rx_queues;
529
530                 for (i = nb_queues; i < old_nb_queues; i++)
531                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
532         }
533         dev->data->nb_rx_queues = nb_queues;
534         return 0;
535 }
536
537 int
538 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
539 {
540         struct rte_eth_dev *dev;
541
542         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
543
544         dev = &rte_eth_devices[port_id];
545         if (rx_queue_id >= dev->data->nb_rx_queues) {
546                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
547                 return -EINVAL;
548         }
549
550         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
551
552         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
553                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
554                         " already started\n",
555                         rx_queue_id, port_id);
556                 return 0;
557         }
558
559         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
560
561 }
562
563 int
564 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
565 {
566         struct rte_eth_dev *dev;
567
568         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
569
570         dev = &rte_eth_devices[port_id];
571         if (rx_queue_id >= dev->data->nb_rx_queues) {
572                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
573                 return -EINVAL;
574         }
575
576         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
577
578         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
579                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
580                         " already stopped\n",
581                         rx_queue_id, port_id);
582                 return 0;
583         }
584
585         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
586
587 }
588
589 int
590 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
591 {
592         struct rte_eth_dev *dev;
593
594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
595
596         dev = &rte_eth_devices[port_id];
597         if (tx_queue_id >= dev->data->nb_tx_queues) {
598                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
599                 return -EINVAL;
600         }
601
602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
603
604         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
605                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
606                         " already started\n",
607                         tx_queue_id, port_id);
608                 return 0;
609         }
610
611         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
612
613 }
614
615 int
616 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
617 {
618         struct rte_eth_dev *dev;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
621
622         dev = &rte_eth_devices[port_id];
623         if (tx_queue_id >= dev->data->nb_tx_queues) {
624                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
625                 return -EINVAL;
626         }
627
628         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
629
630         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
631                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
632                         " already stopped\n",
633                         tx_queue_id, port_id);
634                 return 0;
635         }
636
637         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
638
639 }
640
641 static int
642 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
643 {
644         uint16_t old_nb_queues = dev->data->nb_tx_queues;
645         void **txq;
646         unsigned i;
647
648         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
649                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
650                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
651                                                    RTE_CACHE_LINE_SIZE);
652                 if (dev->data->tx_queues == NULL) {
653                         dev->data->nb_tx_queues = 0;
654                         return -(ENOMEM);
655                 }
656         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
657                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
658
659                 txq = dev->data->tx_queues;
660
661                 for (i = nb_queues; i < old_nb_queues; i++)
662                         (*dev->dev_ops->tx_queue_release)(txq[i]);
663                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
664                                   RTE_CACHE_LINE_SIZE);
665                 if (txq == NULL)
666                         return -ENOMEM;
667                 if (nb_queues > old_nb_queues) {
668                         uint16_t new_qs = nb_queues - old_nb_queues;
669
670                         memset(txq + old_nb_queues, 0,
671                                sizeof(txq[0]) * new_qs);
672                 }
673
674                 dev->data->tx_queues = txq;
675
676         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
677                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
678
679                 txq = dev->data->tx_queues;
680
681                 for (i = nb_queues; i < old_nb_queues; i++)
682                         (*dev->dev_ops->tx_queue_release)(txq[i]);
683         }
684         dev->data->nb_tx_queues = nb_queues;
685         return 0;
686 }
687
688 uint32_t
689 rte_eth_speed_bitflag(uint32_t speed, int duplex)
690 {
691         switch (speed) {
692         case ETH_SPEED_NUM_10M:
693                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
694         case ETH_SPEED_NUM_100M:
695                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
696         case ETH_SPEED_NUM_1G:
697                 return ETH_LINK_SPEED_1G;
698         case ETH_SPEED_NUM_2_5G:
699                 return ETH_LINK_SPEED_2_5G;
700         case ETH_SPEED_NUM_5G:
701                 return ETH_LINK_SPEED_5G;
702         case ETH_SPEED_NUM_10G:
703                 return ETH_LINK_SPEED_10G;
704         case ETH_SPEED_NUM_20G:
705                 return ETH_LINK_SPEED_20G;
706         case ETH_SPEED_NUM_25G:
707                 return ETH_LINK_SPEED_25G;
708         case ETH_SPEED_NUM_40G:
709                 return ETH_LINK_SPEED_40G;
710         case ETH_SPEED_NUM_50G:
711                 return ETH_LINK_SPEED_50G;
712         case ETH_SPEED_NUM_56G:
713                 return ETH_LINK_SPEED_56G;
714         case ETH_SPEED_NUM_100G:
715                 return ETH_LINK_SPEED_100G;
716         default:
717                 return 0;
718         }
719 }
720
721 int
722 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
723                       const struct rte_eth_conf *dev_conf)
724 {
725         struct rte_eth_dev *dev;
726         struct rte_eth_dev_info dev_info;
727         int diag;
728
729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
730
731         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
732                 RTE_PMD_DEBUG_TRACE(
733                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
734                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
735                 return -EINVAL;
736         }
737
738         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
739                 RTE_PMD_DEBUG_TRACE(
740                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
741                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
742                 return -EINVAL;
743         }
744
745         dev = &rte_eth_devices[port_id];
746
747         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
748         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
749
750         if (dev->data->dev_started) {
751                 RTE_PMD_DEBUG_TRACE(
752                     "port %d must be stopped to allow configuration\n", port_id);
753                 return -EBUSY;
754         }
755
756         /* Copy the dev_conf parameter into the dev structure */
757         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
758
759         /*
760          * Check that the numbers of RX and TX queues are not greater
761          * than the maximum number of RX and TX queues supported by the
762          * configured device.
763          */
764         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
765
766         if (nb_rx_q == 0 && nb_tx_q == 0) {
767                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
768                 return -EINVAL;
769         }
770
771         if (nb_rx_q > dev_info.max_rx_queues) {
772                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
773                                 port_id, nb_rx_q, dev_info.max_rx_queues);
774                 return -EINVAL;
775         }
776
777         if (nb_tx_q > dev_info.max_tx_queues) {
778                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
779                                 port_id, nb_tx_q, dev_info.max_tx_queues);
780                 return -EINVAL;
781         }
782
783         /*
784          * If link state interrupt is enabled, check that the
785          * device supports it.
786          */
787         if ((dev_conf->intr_conf.lsc == 1) &&
788                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
789                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
790                                         dev->data->drv_name);
791                         return -EINVAL;
792         }
793
794         /*
795          * If jumbo frames are enabled, check that the maximum RX packet
796          * length is supported by the configured device.
797          */
798         if (dev_conf->rxmode.jumbo_frame == 1) {
799                 if (dev_conf->rxmode.max_rx_pkt_len >
800                     dev_info.max_rx_pktlen) {
801                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
802                                 " > max valid value %u\n",
803                                 port_id,
804                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
805                                 (unsigned)dev_info.max_rx_pktlen);
806                         return -EINVAL;
807                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
808                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
809                                 " < min valid value %u\n",
810                                 port_id,
811                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
812                                 (unsigned)ETHER_MIN_LEN);
813                         return -EINVAL;
814                 }
815         } else {
816                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
817                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
818                         /* Use default value */
819                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
820                                                         ETHER_MAX_LEN;
821         }
822
823         /*
824          * Setup new number of RX/TX queues and reconfigure device.
825          */
826         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
827         if (diag != 0) {
828                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
829                                 port_id, diag);
830                 return diag;
831         }
832
833         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
834         if (diag != 0) {
835                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
836                                 port_id, diag);
837                 rte_eth_dev_rx_queue_config(dev, 0);
838                 return diag;
839         }
840
841         diag = (*dev->dev_ops->dev_configure)(dev);
842         if (diag != 0) {
843                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
844                                 port_id, diag);
845                 rte_eth_dev_rx_queue_config(dev, 0);
846                 rte_eth_dev_tx_queue_config(dev, 0);
847                 return diag;
848         }
849
850         return 0;
851 }
852
853 static void
854 rte_eth_dev_config_restore(uint8_t port_id)
855 {
856         struct rte_eth_dev *dev;
857         struct rte_eth_dev_info dev_info;
858         struct ether_addr addr;
859         uint16_t i;
860         uint32_t pool = 0;
861
862         dev = &rte_eth_devices[port_id];
863
864         rte_eth_dev_info_get(port_id, &dev_info);
865
866         if (RTE_ETH_DEV_SRIOV(dev).active)
867                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
868
869         /* replay MAC address configuration */
870         for (i = 0; i < dev_info.max_mac_addrs; i++) {
871                 addr = dev->data->mac_addrs[i];
872
873                 /* skip zero address */
874                 if (is_zero_ether_addr(&addr))
875                         continue;
876
877                 /* add address to the hardware */
878                 if  (*dev->dev_ops->mac_addr_add &&
879                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
880                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
881                 else {
882                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
883                                         port_id);
884                         /* exit the loop but not return an error */
885                         break;
886                 }
887         }
888
889         /* replay promiscuous configuration */
890         if (rte_eth_promiscuous_get(port_id) == 1)
891                 rte_eth_promiscuous_enable(port_id);
892         else if (rte_eth_promiscuous_get(port_id) == 0)
893                 rte_eth_promiscuous_disable(port_id);
894
895         /* replay all multicast configuration */
896         if (rte_eth_allmulticast_get(port_id) == 1)
897                 rte_eth_allmulticast_enable(port_id);
898         else if (rte_eth_allmulticast_get(port_id) == 0)
899                 rte_eth_allmulticast_disable(port_id);
900 }
901
902 int
903 rte_eth_dev_start(uint8_t port_id)
904 {
905         struct rte_eth_dev *dev;
906         int diag;
907
908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
909
910         dev = &rte_eth_devices[port_id];
911
912         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
913
914         if (dev->data->dev_started != 0) {
915                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
916                         " already started\n",
917                         port_id);
918                 return 0;
919         }
920
921         diag = (*dev->dev_ops->dev_start)(dev);
922         if (diag == 0)
923                 dev->data->dev_started = 1;
924         else
925                 return diag;
926
927         rte_eth_dev_config_restore(port_id);
928
929         if (dev->data->dev_conf.intr_conf.lsc == 0) {
930                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
931                 (*dev->dev_ops->link_update)(dev, 0);
932         }
933         return 0;
934 }
935
936 void
937 rte_eth_dev_stop(uint8_t port_id)
938 {
939         struct rte_eth_dev *dev;
940
941         RTE_ETH_VALID_PORTID_OR_RET(port_id);
942         dev = &rte_eth_devices[port_id];
943
944         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
945
946         if (dev->data->dev_started == 0) {
947                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
948                         " already stopped\n",
949                         port_id);
950                 return;
951         }
952
953         dev->data->dev_started = 0;
954         (*dev->dev_ops->dev_stop)(dev);
955 }
956
957 int
958 rte_eth_dev_set_link_up(uint8_t port_id)
959 {
960         struct rte_eth_dev *dev;
961
962         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
963
964         dev = &rte_eth_devices[port_id];
965
966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
967         return (*dev->dev_ops->dev_set_link_up)(dev);
968 }
969
970 int
971 rte_eth_dev_set_link_down(uint8_t port_id)
972 {
973         struct rte_eth_dev *dev;
974
975         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
976
977         dev = &rte_eth_devices[port_id];
978
979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
980         return (*dev->dev_ops->dev_set_link_down)(dev);
981 }
982
983 void
984 rte_eth_dev_close(uint8_t port_id)
985 {
986         struct rte_eth_dev *dev;
987
988         RTE_ETH_VALID_PORTID_OR_RET(port_id);
989         dev = &rte_eth_devices[port_id];
990
991         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
992         dev->data->dev_started = 0;
993         (*dev->dev_ops->dev_close)(dev);
994
995         rte_free(dev->data->rx_queues);
996         dev->data->rx_queues = NULL;
997         rte_free(dev->data->tx_queues);
998         dev->data->tx_queues = NULL;
999 }
1000
1001 int
1002 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1003                        uint16_t nb_rx_desc, unsigned int socket_id,
1004                        const struct rte_eth_rxconf *rx_conf,
1005                        struct rte_mempool *mp)
1006 {
1007         int ret;
1008         uint32_t mbp_buf_size;
1009         struct rte_eth_dev *dev;
1010         struct rte_eth_dev_info dev_info;
1011
1012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1013
1014         dev = &rte_eth_devices[port_id];
1015         if (rx_queue_id >= dev->data->nb_rx_queues) {
1016                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1017                 return -EINVAL;
1018         }
1019
1020         if (dev->data->dev_started) {
1021                 RTE_PMD_DEBUG_TRACE(
1022                     "port %d must be stopped to allow configuration\n", port_id);
1023                 return -EBUSY;
1024         }
1025
1026         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1028
1029         /*
1030          * Check the size of the mbuf data buffer.
1031          * This value must be provided in the private data of the memory pool.
1032          * First check that the memory pool has a valid private data.
1033          */
1034         rte_eth_dev_info_get(port_id, &dev_info);
1035         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1036                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1037                                 mp->name, (int) mp->private_data_size,
1038                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1039                 return -ENOSPC;
1040         }
1041         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1042
1043         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1044                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1045                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1046                                 "=%d)\n",
1047                                 mp->name,
1048                                 (int)mbp_buf_size,
1049                                 (int)(RTE_PKTMBUF_HEADROOM +
1050                                       dev_info.min_rx_bufsize),
1051                                 (int)RTE_PKTMBUF_HEADROOM,
1052                                 (int)dev_info.min_rx_bufsize);
1053                 return -EINVAL;
1054         }
1055
1056         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1057                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1058                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1059
1060                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1061                         "should be: <= %hu, = %hu, and a product of %hu\n",
1062                         nb_rx_desc,
1063                         dev_info.rx_desc_lim.nb_max,
1064                         dev_info.rx_desc_lim.nb_min,
1065                         dev_info.rx_desc_lim.nb_align);
1066                 return -EINVAL;
1067         }
1068
1069         if (rx_conf == NULL)
1070                 rx_conf = &dev_info.default_rxconf;
1071
1072         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1073                                               socket_id, rx_conf, mp);
1074         if (!ret) {
1075                 if (!dev->data->min_rx_buf_size ||
1076                     dev->data->min_rx_buf_size > mbp_buf_size)
1077                         dev->data->min_rx_buf_size = mbp_buf_size;
1078         }
1079
1080         return ret;
1081 }
1082
1083 int
1084 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1085                        uint16_t nb_tx_desc, unsigned int socket_id,
1086                        const struct rte_eth_txconf *tx_conf)
1087 {
1088         struct rte_eth_dev *dev;
1089         struct rte_eth_dev_info dev_info;
1090
1091         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1092
1093         dev = &rte_eth_devices[port_id];
1094         if (tx_queue_id >= dev->data->nb_tx_queues) {
1095                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1096                 return -EINVAL;
1097         }
1098
1099         if (dev->data->dev_started) {
1100                 RTE_PMD_DEBUG_TRACE(
1101                     "port %d must be stopped to allow configuration\n", port_id);
1102                 return -EBUSY;
1103         }
1104
1105         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1107
1108         rte_eth_dev_info_get(port_id, &dev_info);
1109
1110         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1111             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1112             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1113                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1114                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1115                                 nb_tx_desc,
1116                                 dev_info.tx_desc_lim.nb_max,
1117                                 dev_info.tx_desc_lim.nb_min,
1118                                 dev_info.tx_desc_lim.nb_align);
1119                 return -EINVAL;
1120         }
1121
1122         if (tx_conf == NULL)
1123                 tx_conf = &dev_info.default_txconf;
1124
1125         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1126                                                socket_id, tx_conf);
1127 }
1128
1129 void
1130 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1131                 void *userdata __rte_unused)
1132 {
1133         unsigned i;
1134
1135         for (i = 0; i < unsent; i++)
1136                 rte_pktmbuf_free(pkts[i]);
1137 }
1138
1139 void
1140 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1141                 void *userdata)
1142 {
1143         uint64_t *count = userdata;
1144         unsigned i;
1145
1146         for (i = 0; i < unsent; i++)
1147                 rte_pktmbuf_free(pkts[i]);
1148
1149         *count += unsent;
1150 }
1151
1152 int
1153 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1154                 buffer_tx_error_fn cbfn, void *userdata)
1155 {
1156         buffer->error_callback = cbfn;
1157         buffer->error_userdata = userdata;
1158         return 0;
1159 }
1160
1161 int
1162 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1163 {
1164         int ret = 0;
1165
1166         if (buffer == NULL)
1167                 return -EINVAL;
1168
1169         buffer->size = size;
1170         if (buffer->error_callback == NULL) {
1171                 ret = rte_eth_tx_buffer_set_err_callback(
1172                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1173         }
1174
1175         return ret;
1176 }
1177
1178 void
1179 rte_eth_promiscuous_enable(uint8_t port_id)
1180 {
1181         struct rte_eth_dev *dev;
1182
1183         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1184         dev = &rte_eth_devices[port_id];
1185
1186         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1187         (*dev->dev_ops->promiscuous_enable)(dev);
1188         dev->data->promiscuous = 1;
1189 }
1190
1191 void
1192 rte_eth_promiscuous_disable(uint8_t port_id)
1193 {
1194         struct rte_eth_dev *dev;
1195
1196         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1197         dev = &rte_eth_devices[port_id];
1198
1199         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1200         dev->data->promiscuous = 0;
1201         (*dev->dev_ops->promiscuous_disable)(dev);
1202 }
1203
1204 int
1205 rte_eth_promiscuous_get(uint8_t port_id)
1206 {
1207         struct rte_eth_dev *dev;
1208
1209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1210
1211         dev = &rte_eth_devices[port_id];
1212         return dev->data->promiscuous;
1213 }
1214
1215 void
1216 rte_eth_allmulticast_enable(uint8_t port_id)
1217 {
1218         struct rte_eth_dev *dev;
1219
1220         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1221         dev = &rte_eth_devices[port_id];
1222
1223         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1224         (*dev->dev_ops->allmulticast_enable)(dev);
1225         dev->data->all_multicast = 1;
1226 }
1227
1228 void
1229 rte_eth_allmulticast_disable(uint8_t port_id)
1230 {
1231         struct rte_eth_dev *dev;
1232
1233         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1234         dev = &rte_eth_devices[port_id];
1235
1236         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1237         dev->data->all_multicast = 0;
1238         (*dev->dev_ops->allmulticast_disable)(dev);
1239 }
1240
1241 int
1242 rte_eth_allmulticast_get(uint8_t port_id)
1243 {
1244         struct rte_eth_dev *dev;
1245
1246         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1247
1248         dev = &rte_eth_devices[port_id];
1249         return dev->data->all_multicast;
1250 }
1251
1252 static inline int
1253 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1254                                 struct rte_eth_link *link)
1255 {
1256         struct rte_eth_link *dst = link;
1257         struct rte_eth_link *src = &(dev->data->dev_link);
1258
1259         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1260                                         *(uint64_t *)src) == 0)
1261                 return -1;
1262
1263         return 0;
1264 }
1265
1266 void
1267 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1268 {
1269         struct rte_eth_dev *dev;
1270
1271         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1272         dev = &rte_eth_devices[port_id];
1273
1274         if (dev->data->dev_conf.intr_conf.lsc != 0)
1275                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1276         else {
1277                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1278                 (*dev->dev_ops->link_update)(dev, 1);
1279                 *eth_link = dev->data->dev_link;
1280         }
1281 }
1282
1283 void
1284 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1285 {
1286         struct rte_eth_dev *dev;
1287
1288         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1289         dev = &rte_eth_devices[port_id];
1290
1291         if (dev->data->dev_conf.intr_conf.lsc != 0)
1292                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1293         else {
1294                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1295                 (*dev->dev_ops->link_update)(dev, 0);
1296                 *eth_link = dev->data->dev_link;
1297         }
1298 }
1299
1300 int
1301 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1302 {
1303         struct rte_eth_dev *dev;
1304
1305         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1306
1307         dev = &rte_eth_devices[port_id];
1308         memset(stats, 0, sizeof(*stats));
1309
1310         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1311         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1312         (*dev->dev_ops->stats_get)(dev, stats);
1313         return 0;
1314 }
1315
1316 void
1317 rte_eth_stats_reset(uint8_t port_id)
1318 {
1319         struct rte_eth_dev *dev;
1320
1321         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1322         dev = &rte_eth_devices[port_id];
1323
1324         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1325         (*dev->dev_ops->stats_reset)(dev);
1326         dev->data->rx_mbuf_alloc_failed = 0;
1327 }
1328
1329 static int
1330 get_xstats_count(uint8_t port_id)
1331 {
1332         struct rte_eth_dev *dev;
1333         int count;
1334
1335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1336         dev = &rte_eth_devices[port_id];
1337         if (dev->dev_ops->xstats_get_names != NULL) {
1338                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1339                 if (count < 0)
1340                         return count;
1341         } else
1342                 count = 0;
1343         count += RTE_NB_STATS;
1344         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1345                  RTE_NB_RXQ_STATS;
1346         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1347                  RTE_NB_TXQ_STATS;
1348         return count;
1349 }
1350
1351 int
1352 rte_eth_xstats_get_names(uint8_t port_id,
1353         struct rte_eth_xstat_name *xstats_names,
1354         unsigned size)
1355 {
1356         struct rte_eth_dev *dev;
1357         int cnt_used_entries;
1358         int cnt_expected_entries;
1359         int cnt_driver_entries;
1360         uint32_t idx, id_queue;
1361         uint16_t num_q;
1362
1363         cnt_expected_entries = get_xstats_count(port_id);
1364         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1365                         (int)size < cnt_expected_entries)
1366                 return cnt_expected_entries;
1367
1368         /* port_id checked in get_xstats_count() */
1369         dev = &rte_eth_devices[port_id];
1370         cnt_used_entries = 0;
1371
1372         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1373                 snprintf(xstats_names[cnt_used_entries].name,
1374                         sizeof(xstats_names[0].name),
1375                         "%s", rte_stats_strings[idx].name);
1376                 cnt_used_entries++;
1377         }
1378         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1379         for (id_queue = 0; id_queue < num_q; id_queue++) {
1380                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1381                         snprintf(xstats_names[cnt_used_entries].name,
1382                                 sizeof(xstats_names[0].name),
1383                                 "rx_q%u%s",
1384                                 id_queue, rte_rxq_stats_strings[idx].name);
1385                         cnt_used_entries++;
1386                 }
1387
1388         }
1389         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1390         for (id_queue = 0; id_queue < num_q; id_queue++) {
1391                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1392                         snprintf(xstats_names[cnt_used_entries].name,
1393                                 sizeof(xstats_names[0].name),
1394                                 "tx_q%u%s",
1395                                 id_queue, rte_txq_stats_strings[idx].name);
1396                         cnt_used_entries++;
1397                 }
1398         }
1399
1400         if (dev->dev_ops->xstats_get_names != NULL) {
1401                 /* If there are any driver-specific xstats, append them
1402                  * to end of list.
1403                  */
1404                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1405                         dev,
1406                         xstats_names + cnt_used_entries,
1407                         size - cnt_used_entries);
1408                 if (cnt_driver_entries < 0)
1409                         return cnt_driver_entries;
1410                 cnt_used_entries += cnt_driver_entries;
1411         }
1412
1413         return cnt_used_entries;
1414 }
1415
1416 /* retrieve ethdev extended statistics */
1417 int
1418 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1419         unsigned n)
1420 {
1421         struct rte_eth_stats eth_stats;
1422         struct rte_eth_dev *dev;
1423         unsigned count = 0, i, q;
1424         signed xcount = 0;
1425         uint64_t val, *stats_ptr;
1426         uint16_t nb_rxqs, nb_txqs;
1427
1428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1429
1430         dev = &rte_eth_devices[port_id];
1431
1432         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1433         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1434
1435         /* Return generic statistics */
1436         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1437                 (nb_txqs * RTE_NB_TXQ_STATS);
1438
1439         /* implemented by the driver */
1440         if (dev->dev_ops->xstats_get != NULL) {
1441                 /* Retrieve the xstats from the driver at the end of the
1442                  * xstats struct.
1443                  */
1444                 xcount = (*dev->dev_ops->xstats_get)(dev,
1445                                      xstats ? xstats + count : NULL,
1446                                      (n > count) ? n - count : 0);
1447
1448                 if (xcount < 0)
1449                         return xcount;
1450         }
1451
1452         if (n < count + xcount || xstats == NULL)
1453                 return count + xcount;
1454
1455         /* now fill the xstats structure */
1456         count = 0;
1457         rte_eth_stats_get(port_id, &eth_stats);
1458
1459         /* global stats */
1460         for (i = 0; i < RTE_NB_STATS; i++) {
1461                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1462                                         rte_stats_strings[i].offset);
1463                 val = *stats_ptr;
1464                 xstats[count++].value = val;
1465         }
1466
1467         /* per-rxq stats */
1468         for (q = 0; q < nb_rxqs; q++) {
1469                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1470                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1471                                         rte_rxq_stats_strings[i].offset +
1472                                         q * sizeof(uint64_t));
1473                         val = *stats_ptr;
1474                         xstats[count++].value = val;
1475                 }
1476         }
1477
1478         /* per-txq stats */
1479         for (q = 0; q < nb_txqs; q++) {
1480                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1481                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1482                                         rte_txq_stats_strings[i].offset +
1483                                         q * sizeof(uint64_t));
1484                         val = *stats_ptr;
1485                         xstats[count++].value = val;
1486                 }
1487         }
1488
1489         for (i = 0; i < count + xcount; i++)
1490                 xstats[i].id = i;
1491
1492         return count + xcount;
1493 }
1494
1495 /* reset ethdev extended statistics */
1496 void
1497 rte_eth_xstats_reset(uint8_t port_id)
1498 {
1499         struct rte_eth_dev *dev;
1500
1501         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1502         dev = &rte_eth_devices[port_id];
1503
1504         /* implemented by the driver */
1505         if (dev->dev_ops->xstats_reset != NULL) {
1506                 (*dev->dev_ops->xstats_reset)(dev);
1507                 return;
1508         }
1509
1510         /* fallback to default */
1511         rte_eth_stats_reset(port_id);
1512 }
1513
1514 static int
1515 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1516                 uint8_t is_rx)
1517 {
1518         struct rte_eth_dev *dev;
1519
1520         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1521
1522         dev = &rte_eth_devices[port_id];
1523
1524         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1525         return (*dev->dev_ops->queue_stats_mapping_set)
1526                         (dev, queue_id, stat_idx, is_rx);
1527 }
1528
1529
1530 int
1531 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1532                 uint8_t stat_idx)
1533 {
1534         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1535                         STAT_QMAP_TX);
1536 }
1537
1538
1539 int
1540 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1541                 uint8_t stat_idx)
1542 {
1543         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1544                         STAT_QMAP_RX);
1545 }
1546
1547 void
1548 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1549 {
1550         struct rte_eth_dev *dev;
1551         const struct rte_eth_desc_lim lim = {
1552                 .nb_max = UINT16_MAX,
1553                 .nb_min = 0,
1554                 .nb_align = 1,
1555         };
1556
1557         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1558         dev = &rte_eth_devices[port_id];
1559
1560         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1561         dev_info->rx_desc_lim = lim;
1562         dev_info->tx_desc_lim = lim;
1563
1564         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1565         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1566         dev_info->pci_dev = dev->pci_dev;
1567         dev_info->driver_name = dev->data->drv_name;
1568         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1569         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1570 }
1571
1572 int
1573 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1574                                  uint32_t *ptypes, int num)
1575 {
1576         int i, j;
1577         struct rte_eth_dev *dev;
1578         const uint32_t *all_ptypes;
1579
1580         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1581         dev = &rte_eth_devices[port_id];
1582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1583         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1584
1585         if (!all_ptypes)
1586                 return 0;
1587
1588         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1589                 if (all_ptypes[i] & ptype_mask) {
1590                         if (j < num)
1591                                 ptypes[j] = all_ptypes[i];
1592                         j++;
1593                 }
1594
1595         return j;
1596 }
1597
1598 void
1599 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1600 {
1601         struct rte_eth_dev *dev;
1602
1603         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1604         dev = &rte_eth_devices[port_id];
1605         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1606 }
1607
1608
1609 int
1610 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1611 {
1612         struct rte_eth_dev *dev;
1613
1614         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1615
1616         dev = &rte_eth_devices[port_id];
1617         *mtu = dev->data->mtu;
1618         return 0;
1619 }
1620
1621 int
1622 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1623 {
1624         int ret;
1625         struct rte_eth_dev *dev;
1626
1627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1628         dev = &rte_eth_devices[port_id];
1629         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1630
1631         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1632         if (!ret)
1633                 dev->data->mtu = mtu;
1634
1635         return ret;
1636 }
1637
1638 int
1639 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1640 {
1641         struct rte_eth_dev *dev;
1642
1643         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1644         dev = &rte_eth_devices[port_id];
1645         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1646                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1647                 return -ENOSYS;
1648         }
1649
1650         if (vlan_id > 4095) {
1651                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1652                                 port_id, (unsigned) vlan_id);
1653                 return -EINVAL;
1654         }
1655         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1656
1657         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1658 }
1659
1660 int
1661 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1662 {
1663         struct rte_eth_dev *dev;
1664
1665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1666         dev = &rte_eth_devices[port_id];
1667         if (rx_queue_id >= dev->data->nb_rx_queues) {
1668                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1669                 return -EINVAL;
1670         }
1671
1672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1673         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1674
1675         return 0;
1676 }
1677
1678 int
1679 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1680                                 enum rte_vlan_type vlan_type,
1681                                 uint16_t tpid)
1682 {
1683         struct rte_eth_dev *dev;
1684
1685         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1686         dev = &rte_eth_devices[port_id];
1687         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1688
1689         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1690 }
1691
1692 int
1693 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1694 {
1695         struct rte_eth_dev *dev;
1696         int ret = 0;
1697         int mask = 0;
1698         int cur, org = 0;
1699
1700         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1701         dev = &rte_eth_devices[port_id];
1702
1703         /*check which option changed by application*/
1704         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1705         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1706         if (cur != org) {
1707                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1708                 mask |= ETH_VLAN_STRIP_MASK;
1709         }
1710
1711         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1712         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1713         if (cur != org) {
1714                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1715                 mask |= ETH_VLAN_FILTER_MASK;
1716         }
1717
1718         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1719         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1720         if (cur != org) {
1721                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1722                 mask |= ETH_VLAN_EXTEND_MASK;
1723         }
1724
1725         /*no change*/
1726         if (mask == 0)
1727                 return ret;
1728
1729         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1730         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1731
1732         return ret;
1733 }
1734
1735 int
1736 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1737 {
1738         struct rte_eth_dev *dev;
1739         int ret = 0;
1740
1741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1742         dev = &rte_eth_devices[port_id];
1743
1744         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1745                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1746
1747         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1748                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1749
1750         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1751                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1752
1753         return ret;
1754 }
1755
1756 int
1757 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1758 {
1759         struct rte_eth_dev *dev;
1760
1761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1762         dev = &rte_eth_devices[port_id];
1763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1764         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1765
1766         return 0;
1767 }
1768
1769 int
1770 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1771 {
1772         struct rte_eth_dev *dev;
1773
1774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1775         dev = &rte_eth_devices[port_id];
1776         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1777         memset(fc_conf, 0, sizeof(*fc_conf));
1778         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1779 }
1780
1781 int
1782 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1783 {
1784         struct rte_eth_dev *dev;
1785
1786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1787         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1788                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1789                 return -EINVAL;
1790         }
1791
1792         dev = &rte_eth_devices[port_id];
1793         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1794         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1795 }
1796
1797 int
1798 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1799 {
1800         struct rte_eth_dev *dev;
1801
1802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1803         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1804                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1805                 return -EINVAL;
1806         }
1807
1808         dev = &rte_eth_devices[port_id];
1809         /* High water, low water validation are device specific */
1810         if  (*dev->dev_ops->priority_flow_ctrl_set)
1811                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1812         return -ENOTSUP;
1813 }
1814
1815 static int
1816 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1817                         uint16_t reta_size)
1818 {
1819         uint16_t i, num;
1820
1821         if (!reta_conf)
1822                 return -EINVAL;
1823
1824         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1825                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1826                                                         RTE_RETA_GROUP_SIZE);
1827                 return -EINVAL;
1828         }
1829
1830         num = reta_size / RTE_RETA_GROUP_SIZE;
1831         for (i = 0; i < num; i++) {
1832                 if (reta_conf[i].mask)
1833                         return 0;
1834         }
1835
1836         return -EINVAL;
1837 }
1838
1839 static int
1840 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1841                          uint16_t reta_size,
1842                          uint16_t max_rxq)
1843 {
1844         uint16_t i, idx, shift;
1845
1846         if (!reta_conf)
1847                 return -EINVAL;
1848
1849         if (max_rxq == 0) {
1850                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1851                 return -EINVAL;
1852         }
1853
1854         for (i = 0; i < reta_size; i++) {
1855                 idx = i / RTE_RETA_GROUP_SIZE;
1856                 shift = i % RTE_RETA_GROUP_SIZE;
1857                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1858                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1859                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1860                                 "the maximum rxq index: %u\n", idx, shift,
1861                                 reta_conf[idx].reta[shift], max_rxq);
1862                         return -EINVAL;
1863                 }
1864         }
1865
1866         return 0;
1867 }
1868
1869 int
1870 rte_eth_dev_rss_reta_update(uint8_t port_id,
1871                             struct rte_eth_rss_reta_entry64 *reta_conf,
1872                             uint16_t reta_size)
1873 {
1874         struct rte_eth_dev *dev;
1875         int ret;
1876
1877         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1878         /* Check mask bits */
1879         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1880         if (ret < 0)
1881                 return ret;
1882
1883         dev = &rte_eth_devices[port_id];
1884
1885         /* Check entry value */
1886         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1887                                 dev->data->nb_rx_queues);
1888         if (ret < 0)
1889                 return ret;
1890
1891         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1892         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1893 }
1894
1895 int
1896 rte_eth_dev_rss_reta_query(uint8_t port_id,
1897                            struct rte_eth_rss_reta_entry64 *reta_conf,
1898                            uint16_t reta_size)
1899 {
1900         struct rte_eth_dev *dev;
1901         int ret;
1902
1903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1904
1905         /* Check mask bits */
1906         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1907         if (ret < 0)
1908                 return ret;
1909
1910         dev = &rte_eth_devices[port_id];
1911         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1912         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1913 }
1914
1915 int
1916 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1917 {
1918         struct rte_eth_dev *dev;
1919         uint16_t rss_hash_protos;
1920
1921         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1922         rss_hash_protos = rss_conf->rss_hf;
1923         if ((rss_hash_protos != 0) &&
1924             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1925                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1926                                 rss_hash_protos);
1927                 return -EINVAL;
1928         }
1929         dev = &rte_eth_devices[port_id];
1930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1931         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1932 }
1933
1934 int
1935 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1936                               struct rte_eth_rss_conf *rss_conf)
1937 {
1938         struct rte_eth_dev *dev;
1939
1940         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1941         dev = &rte_eth_devices[port_id];
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1943         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1944 }
1945
1946 int
1947 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1948                                 struct rte_eth_udp_tunnel *udp_tunnel)
1949 {
1950         struct rte_eth_dev *dev;
1951
1952         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1953         if (udp_tunnel == NULL) {
1954                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1955                 return -EINVAL;
1956         }
1957
1958         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1959                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1960                 return -EINVAL;
1961         }
1962
1963         dev = &rte_eth_devices[port_id];
1964         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
1965         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
1966 }
1967
1968 int
1969 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
1970                                    struct rte_eth_udp_tunnel *udp_tunnel)
1971 {
1972         struct rte_eth_dev *dev;
1973
1974         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1975         dev = &rte_eth_devices[port_id];
1976
1977         if (udp_tunnel == NULL) {
1978                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1979                 return -EINVAL;
1980         }
1981
1982         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1983                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1984                 return -EINVAL;
1985         }
1986
1987         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
1988         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
1989 }
1990
1991 int
1992 rte_eth_led_on(uint8_t port_id)
1993 {
1994         struct rte_eth_dev *dev;
1995
1996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1997         dev = &rte_eth_devices[port_id];
1998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1999         return (*dev->dev_ops->dev_led_on)(dev);
2000 }
2001
2002 int
2003 rte_eth_led_off(uint8_t port_id)
2004 {
2005         struct rte_eth_dev *dev;
2006
2007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2008         dev = &rte_eth_devices[port_id];
2009         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2010         return (*dev->dev_ops->dev_led_off)(dev);
2011 }
2012
2013 /*
2014  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2015  * an empty spot.
2016  */
2017 static int
2018 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2019 {
2020         struct rte_eth_dev_info dev_info;
2021         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2022         unsigned i;
2023
2024         rte_eth_dev_info_get(port_id, &dev_info);
2025
2026         for (i = 0; i < dev_info.max_mac_addrs; i++)
2027                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2028                         return i;
2029
2030         return -1;
2031 }
2032
2033 static const struct ether_addr null_mac_addr;
2034
2035 int
2036 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2037                         uint32_t pool)
2038 {
2039         struct rte_eth_dev *dev;
2040         int index;
2041         uint64_t pool_mask;
2042
2043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2044         dev = &rte_eth_devices[port_id];
2045         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2046
2047         if (is_zero_ether_addr(addr)) {
2048                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2049                         port_id);
2050                 return -EINVAL;
2051         }
2052         if (pool >= ETH_64_POOLS) {
2053                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2054                 return -EINVAL;
2055         }
2056
2057         index = get_mac_addr_index(port_id, addr);
2058         if (index < 0) {
2059                 index = get_mac_addr_index(port_id, &null_mac_addr);
2060                 if (index < 0) {
2061                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2062                                 port_id);
2063                         return -ENOSPC;
2064                 }
2065         } else {
2066                 pool_mask = dev->data->mac_pool_sel[index];
2067
2068                 /* Check if both MAC address and pool is already there, and do nothing */
2069                 if (pool_mask & (1ULL << pool))
2070                         return 0;
2071         }
2072
2073         /* Update NIC */
2074         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2075
2076         /* Update address in NIC data structure */
2077         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2078
2079         /* Update pool bitmap in NIC data structure */
2080         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2081
2082         return 0;
2083 }
2084
2085 int
2086 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2087 {
2088         struct rte_eth_dev *dev;
2089         int index;
2090
2091         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2092         dev = &rte_eth_devices[port_id];
2093         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2094
2095         index = get_mac_addr_index(port_id, addr);
2096         if (index == 0) {
2097                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2098                 return -EADDRINUSE;
2099         } else if (index < 0)
2100                 return 0;  /* Do nothing if address wasn't found */
2101
2102         /* Update NIC */
2103         (*dev->dev_ops->mac_addr_remove)(dev, index);
2104
2105         /* Update address in NIC data structure */
2106         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2107
2108         /* reset pool bitmap */
2109         dev->data->mac_pool_sel[index] = 0;
2110
2111         return 0;
2112 }
2113
2114 int
2115 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2116 {
2117         struct rte_eth_dev *dev;
2118
2119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2120
2121         if (!is_valid_assigned_ether_addr(addr))
2122                 return -EINVAL;
2123
2124         dev = &rte_eth_devices[port_id];
2125         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2126
2127         /* Update default address in NIC data structure */
2128         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2129
2130         (*dev->dev_ops->mac_addr_set)(dev, addr);
2131
2132         return 0;
2133 }
2134
2135 int
2136 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2137                                 uint16_t rx_mode, uint8_t on)
2138 {
2139         uint16_t num_vfs;
2140         struct rte_eth_dev *dev;
2141         struct rte_eth_dev_info dev_info;
2142
2143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2144
2145         dev = &rte_eth_devices[port_id];
2146         rte_eth_dev_info_get(port_id, &dev_info);
2147
2148         num_vfs = dev_info.max_vfs;
2149         if (vf > num_vfs) {
2150                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2151                 return -EINVAL;
2152         }
2153
2154         if (rx_mode == 0) {
2155                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2156                 return -EINVAL;
2157         }
2158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2159         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2160 }
2161
2162 /*
2163  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2164  * an empty spot.
2165  */
2166 static int
2167 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2168 {
2169         struct rte_eth_dev_info dev_info;
2170         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2171         unsigned i;
2172
2173         rte_eth_dev_info_get(port_id, &dev_info);
2174         if (!dev->data->hash_mac_addrs)
2175                 return -1;
2176
2177         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2178                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2179                         ETHER_ADDR_LEN) == 0)
2180                         return i;
2181
2182         return -1;
2183 }
2184
2185 int
2186 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2187                                 uint8_t on)
2188 {
2189         int index;
2190         int ret;
2191         struct rte_eth_dev *dev;
2192
2193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2194
2195         dev = &rte_eth_devices[port_id];
2196         if (is_zero_ether_addr(addr)) {
2197                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2198                         port_id);
2199                 return -EINVAL;
2200         }
2201
2202         index = get_hash_mac_addr_index(port_id, addr);
2203         /* Check if it's already there, and do nothing */
2204         if ((index >= 0) && (on))
2205                 return 0;
2206
2207         if (index < 0) {
2208                 if (!on) {
2209                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2210                                 "set in UTA\n", port_id);
2211                         return -EINVAL;
2212                 }
2213
2214                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2215                 if (index < 0) {
2216                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2217                                         port_id);
2218                         return -ENOSPC;
2219                 }
2220         }
2221
2222         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2223         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2224         if (ret == 0) {
2225                 /* Update address in NIC data structure */
2226                 if (on)
2227                         ether_addr_copy(addr,
2228                                         &dev->data->hash_mac_addrs[index]);
2229                 else
2230                         ether_addr_copy(&null_mac_addr,
2231                                         &dev->data->hash_mac_addrs[index]);
2232         }
2233
2234         return ret;
2235 }
2236
2237 int
2238 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2239 {
2240         struct rte_eth_dev *dev;
2241
2242         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2243
2244         dev = &rte_eth_devices[port_id];
2245
2246         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2247         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2248 }
2249
2250 int
2251 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2252 {
2253         uint16_t num_vfs;
2254         struct rte_eth_dev *dev;
2255         struct rte_eth_dev_info dev_info;
2256
2257         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2258
2259         dev = &rte_eth_devices[port_id];
2260         rte_eth_dev_info_get(port_id, &dev_info);
2261
2262         num_vfs = dev_info.max_vfs;
2263         if (vf > num_vfs) {
2264                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2265                 return -EINVAL;
2266         }
2267
2268         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2269         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2270 }
2271
2272 int
2273 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2274 {
2275         uint16_t num_vfs;
2276         struct rte_eth_dev *dev;
2277         struct rte_eth_dev_info dev_info;
2278
2279         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2280
2281         dev = &rte_eth_devices[port_id];
2282         rte_eth_dev_info_get(port_id, &dev_info);
2283
2284         num_vfs = dev_info.max_vfs;
2285         if (vf > num_vfs) {
2286                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2287                 return -EINVAL;
2288         }
2289
2290         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2291         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2292 }
2293
2294 int
2295 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2296                                uint64_t vf_mask, uint8_t vlan_on)
2297 {
2298         struct rte_eth_dev *dev;
2299
2300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2301
2302         dev = &rte_eth_devices[port_id];
2303
2304         if (vlan_id > ETHER_MAX_VLAN_ID) {
2305                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2306                         vlan_id);
2307                 return -EINVAL;
2308         }
2309
2310         if (vf_mask == 0) {
2311                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2312                 return -EINVAL;
2313         }
2314
2315         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2316         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2317                                                    vf_mask, vlan_on);
2318 }
2319
2320 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2321                                         uint16_t tx_rate)
2322 {
2323         struct rte_eth_dev *dev;
2324         struct rte_eth_dev_info dev_info;
2325         struct rte_eth_link link;
2326
2327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2328
2329         dev = &rte_eth_devices[port_id];
2330         rte_eth_dev_info_get(port_id, &dev_info);
2331         link = dev->data->dev_link;
2332
2333         if (queue_idx > dev_info.max_tx_queues) {
2334                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2335                                 "invalid queue id=%d\n", port_id, queue_idx);
2336                 return -EINVAL;
2337         }
2338
2339         if (tx_rate > link.link_speed) {
2340                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2341                                 "bigger than link speed= %d\n",
2342                         tx_rate, link.link_speed);
2343                 return -EINVAL;
2344         }
2345
2346         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2347         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2348 }
2349
2350 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2351                                 uint64_t q_msk)
2352 {
2353         struct rte_eth_dev *dev;
2354         struct rte_eth_dev_info dev_info;
2355         struct rte_eth_link link;
2356
2357         if (q_msk == 0)
2358                 return 0;
2359
2360         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2361
2362         dev = &rte_eth_devices[port_id];
2363         rte_eth_dev_info_get(port_id, &dev_info);
2364         link = dev->data->dev_link;
2365
2366         if (vf > dev_info.max_vfs) {
2367                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2368                                 "invalid vf id=%d\n", port_id, vf);
2369                 return -EINVAL;
2370         }
2371
2372         if (tx_rate > link.link_speed) {
2373                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2374                                 "bigger than link speed= %d\n",
2375                                 tx_rate, link.link_speed);
2376                 return -EINVAL;
2377         }
2378
2379         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2380         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2381 }
2382
2383 int
2384 rte_eth_mirror_rule_set(uint8_t port_id,
2385                         struct rte_eth_mirror_conf *mirror_conf,
2386                         uint8_t rule_id, uint8_t on)
2387 {
2388         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2389
2390         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2391         if (mirror_conf->rule_type == 0) {
2392                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2393                 return -EINVAL;
2394         }
2395
2396         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2397                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2398                                 ETH_64_POOLS - 1);
2399                 return -EINVAL;
2400         }
2401
2402         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2403              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2404             (mirror_conf->pool_mask == 0)) {
2405                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2406                 return -EINVAL;
2407         }
2408
2409         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2410             mirror_conf->vlan.vlan_mask == 0) {
2411                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2412                 return -EINVAL;
2413         }
2414
2415         dev = &rte_eth_devices[port_id];
2416         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2417
2418         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2419 }
2420
2421 int
2422 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2423 {
2424         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2425
2426         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2427
2428         dev = &rte_eth_devices[port_id];
2429         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2430
2431         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2432 }
2433
2434 int
2435 rte_eth_dev_callback_register(uint8_t port_id,
2436                         enum rte_eth_event_type event,
2437                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2438 {
2439         struct rte_eth_dev *dev;
2440         struct rte_eth_dev_callback *user_cb;
2441
2442         if (!cb_fn)
2443                 return -EINVAL;
2444
2445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2446
2447         dev = &rte_eth_devices[port_id];
2448         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2449
2450         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2451                 if (user_cb->cb_fn == cb_fn &&
2452                         user_cb->cb_arg == cb_arg &&
2453                         user_cb->event == event) {
2454                         break;
2455                 }
2456         }
2457
2458         /* create a new callback. */
2459         if (user_cb == NULL) {
2460                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2461                                         sizeof(struct rte_eth_dev_callback), 0);
2462                 if (user_cb != NULL) {
2463                         user_cb->cb_fn = cb_fn;
2464                         user_cb->cb_arg = cb_arg;
2465                         user_cb->event = event;
2466                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2467                 }
2468         }
2469
2470         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2471         return (user_cb == NULL) ? -ENOMEM : 0;
2472 }
2473
2474 int
2475 rte_eth_dev_callback_unregister(uint8_t port_id,
2476                         enum rte_eth_event_type event,
2477                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2478 {
2479         int ret;
2480         struct rte_eth_dev *dev;
2481         struct rte_eth_dev_callback *cb, *next;
2482
2483         if (!cb_fn)
2484                 return -EINVAL;
2485
2486         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2487
2488         dev = &rte_eth_devices[port_id];
2489         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2490
2491         ret = 0;
2492         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2493
2494                 next = TAILQ_NEXT(cb, next);
2495
2496                 if (cb->cb_fn != cb_fn || cb->event != event ||
2497                                 (cb->cb_arg != (void *)-1 &&
2498                                 cb->cb_arg != cb_arg))
2499                         continue;
2500
2501                 /*
2502                  * if this callback is not executing right now,
2503                  * then remove it.
2504                  */
2505                 if (cb->active == 0) {
2506                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2507                         rte_free(cb);
2508                 } else {
2509                         ret = -EAGAIN;
2510                 }
2511         }
2512
2513         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2514         return ret;
2515 }
2516
2517 void
2518 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2519         enum rte_eth_event_type event, void *cb_arg)
2520 {
2521         struct rte_eth_dev_callback *cb_lst;
2522         struct rte_eth_dev_callback dev_cb;
2523
2524         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2525         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2526                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2527                         continue;
2528                 dev_cb = *cb_lst;
2529                 cb_lst->active = 1;
2530                 if (cb_arg != NULL)
2531                         dev_cb.cb_arg = (void *) cb_arg;
2532
2533                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2534                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2535                                                 dev_cb.cb_arg);
2536                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2537                 cb_lst->active = 0;
2538         }
2539         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2540 }
2541
2542 int
2543 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2544 {
2545         uint32_t vec;
2546         struct rte_eth_dev *dev;
2547         struct rte_intr_handle *intr_handle;
2548         uint16_t qid;
2549         int rc;
2550
2551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2552
2553         dev = &rte_eth_devices[port_id];
2554         intr_handle = &dev->pci_dev->intr_handle;
2555         if (!intr_handle->intr_vec) {
2556                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2557                 return -EPERM;
2558         }
2559
2560         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2561                 vec = intr_handle->intr_vec[qid];
2562                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2563                 if (rc && rc != -EEXIST) {
2564                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2565                                         " op %d epfd %d vec %u\n",
2566                                         port_id, qid, op, epfd, vec);
2567                 }
2568         }
2569
2570         return 0;
2571 }
2572
2573 const struct rte_memzone *
2574 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2575                          uint16_t queue_id, size_t size, unsigned align,
2576                          int socket_id)
2577 {
2578         char z_name[RTE_MEMZONE_NAMESIZE];
2579         const struct rte_memzone *mz;
2580
2581         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2582                  dev->driver->pci_drv.driver.name, ring_name,
2583                  dev->data->port_id, queue_id);
2584
2585         mz = rte_memzone_lookup(z_name);
2586         if (mz)
2587                 return mz;
2588
2589         if (rte_xen_dom0_supported())
2590                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2591                                                    0, align, RTE_PGSIZE_2M);
2592         else
2593                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2594                                                    0, align);
2595 }
2596
2597 int
2598 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2599                           int epfd, int op, void *data)
2600 {
2601         uint32_t vec;
2602         struct rte_eth_dev *dev;
2603         struct rte_intr_handle *intr_handle;
2604         int rc;
2605
2606         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2607
2608         dev = &rte_eth_devices[port_id];
2609         if (queue_id >= dev->data->nb_rx_queues) {
2610                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2611                 return -EINVAL;
2612         }
2613
2614         intr_handle = &dev->pci_dev->intr_handle;
2615         if (!intr_handle->intr_vec) {
2616                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2617                 return -EPERM;
2618         }
2619
2620         vec = intr_handle->intr_vec[queue_id];
2621         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2622         if (rc && rc != -EEXIST) {
2623                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2624                                 " op %d epfd %d vec %u\n",
2625                                 port_id, queue_id, op, epfd, vec);
2626                 return rc;
2627         }
2628
2629         return 0;
2630 }
2631
2632 int
2633 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2634                            uint16_t queue_id)
2635 {
2636         struct rte_eth_dev *dev;
2637
2638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2639
2640         dev = &rte_eth_devices[port_id];
2641
2642         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2643         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2644 }
2645
2646 int
2647 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2648                             uint16_t queue_id)
2649 {
2650         struct rte_eth_dev *dev;
2651
2652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2653
2654         dev = &rte_eth_devices[port_id];
2655
2656         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2657         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2658 }
2659
2660 #ifdef RTE_NIC_BYPASS
2661 int rte_eth_dev_bypass_init(uint8_t port_id)
2662 {
2663         struct rte_eth_dev *dev;
2664
2665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2666
2667         dev = &rte_eth_devices[port_id];
2668         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2669         (*dev->dev_ops->bypass_init)(dev);
2670         return 0;
2671 }
2672
2673 int
2674 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2675 {
2676         struct rte_eth_dev *dev;
2677
2678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679
2680         dev = &rte_eth_devices[port_id];
2681         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2682         (*dev->dev_ops->bypass_state_show)(dev, state);
2683         return 0;
2684 }
2685
2686 int
2687 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2688 {
2689         struct rte_eth_dev *dev;
2690
2691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2692
2693         dev = &rte_eth_devices[port_id];
2694         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2695         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2696         return 0;
2697 }
2698
2699 int
2700 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2701 {
2702         struct rte_eth_dev *dev;
2703
2704         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2705
2706         dev = &rte_eth_devices[port_id];
2707         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2708         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2709         return 0;
2710 }
2711
2712 int
2713 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2714 {
2715         struct rte_eth_dev *dev;
2716
2717         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2718
2719         dev = &rte_eth_devices[port_id];
2720
2721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2722         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2723         return 0;
2724 }
2725
2726 int
2727 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2728 {
2729         struct rte_eth_dev *dev;
2730
2731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2732
2733         dev = &rte_eth_devices[port_id];
2734
2735         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2736         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2737         return 0;
2738 }
2739
2740 int
2741 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2742 {
2743         struct rte_eth_dev *dev;
2744
2745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2746
2747         dev = &rte_eth_devices[port_id];
2748
2749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2750         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2751         return 0;
2752 }
2753
2754 int
2755 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2756 {
2757         struct rte_eth_dev *dev;
2758
2759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2760
2761         dev = &rte_eth_devices[port_id];
2762
2763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2764         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2765         return 0;
2766 }
2767
2768 int
2769 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2770 {
2771         struct rte_eth_dev *dev;
2772
2773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2774
2775         dev = &rte_eth_devices[port_id];
2776
2777         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2778         (*dev->dev_ops->bypass_wd_reset)(dev);
2779         return 0;
2780 }
2781 #endif
2782
2783 int
2784 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2785 {
2786         struct rte_eth_dev *dev;
2787
2788         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2789
2790         dev = &rte_eth_devices[port_id];
2791         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2792         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2793                                 RTE_ETH_FILTER_NOP, NULL);
2794 }
2795
2796 int
2797 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2798                        enum rte_filter_op filter_op, void *arg)
2799 {
2800         struct rte_eth_dev *dev;
2801
2802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2803
2804         dev = &rte_eth_devices[port_id];
2805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2806         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2807 }
2808
2809 void *
2810 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2811                 rte_rx_callback_fn fn, void *user_param)
2812 {
2813 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2814         rte_errno = ENOTSUP;
2815         return NULL;
2816 #endif
2817         /* check input parameters */
2818         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2819                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2820                 rte_errno = EINVAL;
2821                 return NULL;
2822         }
2823         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2824
2825         if (cb == NULL) {
2826                 rte_errno = ENOMEM;
2827                 return NULL;
2828         }
2829
2830         cb->fn.rx = fn;
2831         cb->param = user_param;
2832
2833         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2834         /* Add the callbacks in fifo order. */
2835         struct rte_eth_rxtx_callback *tail =
2836                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2837
2838         if (!tail) {
2839                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2840
2841         } else {
2842                 while (tail->next)
2843                         tail = tail->next;
2844                 tail->next = cb;
2845         }
2846         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2847
2848         return cb;
2849 }
2850
2851 void *
2852 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2853                 rte_rx_callback_fn fn, void *user_param)
2854 {
2855 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2856         rte_errno = ENOTSUP;
2857         return NULL;
2858 #endif
2859         /* check input parameters */
2860         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2861                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2862                 rte_errno = EINVAL;
2863                 return NULL;
2864         }
2865
2866         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2867
2868         if (cb == NULL) {
2869                 rte_errno = ENOMEM;
2870                 return NULL;
2871         }
2872
2873         cb->fn.rx = fn;
2874         cb->param = user_param;
2875
2876         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2877         /* Add the callbacks at fisrt position*/
2878         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2879         rte_smp_wmb();
2880         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2881         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2882
2883         return cb;
2884 }
2885
2886 void *
2887 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2888                 rte_tx_callback_fn fn, void *user_param)
2889 {
2890 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2891         rte_errno = ENOTSUP;
2892         return NULL;
2893 #endif
2894         /* check input parameters */
2895         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2896                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2897                 rte_errno = EINVAL;
2898                 return NULL;
2899         }
2900
2901         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2902
2903         if (cb == NULL) {
2904                 rte_errno = ENOMEM;
2905                 return NULL;
2906         }
2907
2908         cb->fn.tx = fn;
2909         cb->param = user_param;
2910
2911         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2912         /* Add the callbacks in fifo order. */
2913         struct rte_eth_rxtx_callback *tail =
2914                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2915
2916         if (!tail) {
2917                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2918
2919         } else {
2920                 while (tail->next)
2921                         tail = tail->next;
2922                 tail->next = cb;
2923         }
2924         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2925
2926         return cb;
2927 }
2928
2929 int
2930 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2931                 struct rte_eth_rxtx_callback *user_cb)
2932 {
2933 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2934         return -ENOTSUP;
2935 #endif
2936         /* Check input parameters. */
2937         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2938         if (user_cb == NULL ||
2939                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2940                 return -EINVAL;
2941
2942         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2943         struct rte_eth_rxtx_callback *cb;
2944         struct rte_eth_rxtx_callback **prev_cb;
2945         int ret = -EINVAL;
2946
2947         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2948         prev_cb = &dev->post_rx_burst_cbs[queue_id];
2949         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2950                 cb = *prev_cb;
2951                 if (cb == user_cb) {
2952                         /* Remove the user cb from the callback list. */
2953                         *prev_cb = cb->next;
2954                         ret = 0;
2955                         break;
2956                 }
2957         }
2958         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2959
2960         return ret;
2961 }
2962
2963 int
2964 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2965                 struct rte_eth_rxtx_callback *user_cb)
2966 {
2967 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2968         return -ENOTSUP;
2969 #endif
2970         /* Check input parameters. */
2971         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2972         if (user_cb == NULL ||
2973                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
2974                 return -EINVAL;
2975
2976         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2977         int ret = -EINVAL;
2978         struct rte_eth_rxtx_callback *cb;
2979         struct rte_eth_rxtx_callback **prev_cb;
2980
2981         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2982         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
2983         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2984                 cb = *prev_cb;
2985                 if (cb == user_cb) {
2986                         /* Remove the user cb from the callback list. */
2987                         *prev_cb = cb->next;
2988                         ret = 0;
2989                         break;
2990                 }
2991         }
2992         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2993
2994         return ret;
2995 }
2996
2997 int
2998 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
2999         struct rte_eth_rxq_info *qinfo)
3000 {
3001         struct rte_eth_dev *dev;
3002
3003         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3004
3005         if (qinfo == NULL)
3006                 return -EINVAL;
3007
3008         dev = &rte_eth_devices[port_id];
3009         if (queue_id >= dev->data->nb_rx_queues) {
3010                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3011                 return -EINVAL;
3012         }
3013
3014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3015
3016         memset(qinfo, 0, sizeof(*qinfo));
3017         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3018         return 0;
3019 }
3020
3021 int
3022 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3023         struct rte_eth_txq_info *qinfo)
3024 {
3025         struct rte_eth_dev *dev;
3026
3027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028
3029         if (qinfo == NULL)
3030                 return -EINVAL;
3031
3032         dev = &rte_eth_devices[port_id];
3033         if (queue_id >= dev->data->nb_tx_queues) {
3034                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3035                 return -EINVAL;
3036         }
3037
3038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3039
3040         memset(qinfo, 0, sizeof(*qinfo));
3041         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3042         return 0;
3043 }
3044
3045 int
3046 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3047                              struct ether_addr *mc_addr_set,
3048                              uint32_t nb_mc_addr)
3049 {
3050         struct rte_eth_dev *dev;
3051
3052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3053
3054         dev = &rte_eth_devices[port_id];
3055         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3056         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3057 }
3058
3059 int
3060 rte_eth_timesync_enable(uint8_t port_id)
3061 {
3062         struct rte_eth_dev *dev;
3063
3064         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3065         dev = &rte_eth_devices[port_id];
3066
3067         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3068         return (*dev->dev_ops->timesync_enable)(dev);
3069 }
3070
3071 int
3072 rte_eth_timesync_disable(uint8_t port_id)
3073 {
3074         struct rte_eth_dev *dev;
3075
3076         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3077         dev = &rte_eth_devices[port_id];
3078
3079         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3080         return (*dev->dev_ops->timesync_disable)(dev);
3081 }
3082
3083 int
3084 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3085                                    uint32_t flags)
3086 {
3087         struct rte_eth_dev *dev;
3088
3089         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3090         dev = &rte_eth_devices[port_id];
3091
3092         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3093         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3094 }
3095
3096 int
3097 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3098 {
3099         struct rte_eth_dev *dev;
3100
3101         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3102         dev = &rte_eth_devices[port_id];
3103
3104         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3105         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3106 }
3107
3108 int
3109 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3110 {
3111         struct rte_eth_dev *dev;
3112
3113         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3114         dev = &rte_eth_devices[port_id];
3115
3116         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3117         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3118 }
3119
3120 int
3121 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3122 {
3123         struct rte_eth_dev *dev;
3124
3125         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3126         dev = &rte_eth_devices[port_id];
3127
3128         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3129         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3130 }
3131
3132 int
3133 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3134 {
3135         struct rte_eth_dev *dev;
3136
3137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3138         dev = &rte_eth_devices[port_id];
3139
3140         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3141         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3142 }
3143
3144 int
3145 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3146 {
3147         struct rte_eth_dev *dev;
3148
3149         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3150
3151         dev = &rte_eth_devices[port_id];
3152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3153         return (*dev->dev_ops->get_reg)(dev, info);
3154 }
3155
3156 int
3157 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3158 {
3159         struct rte_eth_dev *dev;
3160
3161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3162
3163         dev = &rte_eth_devices[port_id];
3164         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3165         return (*dev->dev_ops->get_eeprom_length)(dev);
3166 }
3167
3168 int
3169 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3170 {
3171         struct rte_eth_dev *dev;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174
3175         dev = &rte_eth_devices[port_id];
3176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3177         return (*dev->dev_ops->get_eeprom)(dev, info);
3178 }
3179
3180 int
3181 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3182 {
3183         struct rte_eth_dev *dev;
3184
3185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3186
3187         dev = &rte_eth_devices[port_id];
3188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3189         return (*dev->dev_ops->set_eeprom)(dev, info);
3190 }
3191
3192 int
3193 rte_eth_dev_get_dcb_info(uint8_t port_id,
3194                              struct rte_eth_dcb_info *dcb_info)
3195 {
3196         struct rte_eth_dev *dev;
3197
3198         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3199
3200         dev = &rte_eth_devices[port_id];
3201         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3202
3203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3204         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3205 }
3206
3207 void
3208 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3209 {
3210         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3211                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3212                                 eth_dev, pci_dev);
3213                 return;
3214         }
3215
3216         eth_dev->data->dev_flags = 0;
3217         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3218                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3219         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3220                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3221
3222         eth_dev->data->kdrv = pci_dev->kdrv;
3223         eth_dev->data->numa_node = pci_dev->device.numa_node;
3224         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3225 }
3226
3227 int
3228 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3229                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3230 {
3231         struct rte_eth_dev *dev;
3232
3233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3234         if (l2_tunnel == NULL) {
3235                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3236                 return -EINVAL;
3237         }
3238
3239         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3240                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3241                 return -EINVAL;
3242         }
3243
3244         dev = &rte_eth_devices[port_id];
3245         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3246                                 -ENOTSUP);
3247         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3248 }
3249
3250 int
3251 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3252                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3253                                   uint32_t mask,
3254                                   uint8_t en)
3255 {
3256         struct rte_eth_dev *dev;
3257
3258         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3259
3260         if (l2_tunnel == NULL) {
3261                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3262                 return -EINVAL;
3263         }
3264
3265         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3266                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3267                 return -EINVAL;
3268         }
3269
3270         if (mask == 0) {
3271                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3272                 return -EINVAL;
3273         }
3274
3275         dev = &rte_eth_devices[port_id];
3276         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3277                                 -ENOTSUP);
3278         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3279 }