ethdev: convert to EAL hotplug
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 struct rte_eth_dev *
193 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
194 {
195         uint8_t port_id;
196         struct rte_eth_dev *eth_dev;
197
198         port_id = rte_eth_dev_find_free_port();
199         if (port_id == RTE_MAX_ETHPORTS) {
200                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201                 return NULL;
202         }
203
204         if (rte_eth_dev_data == NULL)
205                 rte_eth_dev_data_alloc();
206
207         if (rte_eth_dev_allocated(name) != NULL) {
208                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209                                 name);
210                 return NULL;
211         }
212
213         eth_dev = &rte_eth_devices[port_id];
214         eth_dev->data = &rte_eth_dev_data[port_id];
215         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
216         eth_dev->data->port_id = port_id;
217         eth_dev->attached = DEV_ATTACHED;
218         eth_dev->dev_type = type;
219         eth_dev_last_created_port = port_id;
220         nb_ports++;
221         return eth_dev;
222 }
223
224 int
225 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
226 {
227         if (eth_dev == NULL)
228                 return -EINVAL;
229
230         eth_dev->attached = DEV_DETACHED;
231         nb_ports--;
232         return 0;
233 }
234
235 int
236 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
237                       struct rte_pci_device *pci_dev)
238 {
239         struct eth_driver    *eth_drv;
240         struct rte_eth_dev *eth_dev;
241         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
242
243         int diag;
244
245         eth_drv = (struct eth_driver *)pci_drv;
246
247         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
248                         sizeof(ethdev_name));
249
250         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
251         if (eth_dev == NULL)
252                 return -ENOMEM;
253
254         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
255                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
256                                   eth_drv->dev_private_size,
257                                   RTE_CACHE_LINE_SIZE);
258                 if (eth_dev->data->dev_private == NULL)
259                         rte_panic("Cannot allocate memzone for private port data\n");
260         }
261         eth_dev->pci_dev = pci_dev;
262         eth_dev->driver = eth_drv;
263         eth_dev->data->rx_mbuf_alloc_failed = 0;
264
265         /* init user callbacks */
266         TAILQ_INIT(&(eth_dev->link_intr_cbs));
267
268         /*
269          * Set the default MTU.
270          */
271         eth_dev->data->mtu = ETHER_MTU;
272
273         /* Invoke PMD device initialization function */
274         diag = (*eth_drv->eth_dev_init)(eth_dev);
275         if (diag == 0)
276                 return 0;
277
278         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
279                         pci_drv->name,
280                         (unsigned) pci_dev->id.vendor_id,
281                         (unsigned) pci_dev->id.device_id);
282         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
283                 rte_free(eth_dev->data->dev_private);
284         rte_eth_dev_release_port(eth_dev);
285         return diag;
286 }
287
288 int
289 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
290 {
291         const struct eth_driver *eth_drv;
292         struct rte_eth_dev *eth_dev;
293         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
294         int ret;
295
296         if (pci_dev == NULL)
297                 return -EINVAL;
298
299         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
300                         sizeof(ethdev_name));
301
302         eth_dev = rte_eth_dev_allocated(ethdev_name);
303         if (eth_dev == NULL)
304                 return -ENODEV;
305
306         eth_drv = (const struct eth_driver *)pci_dev->driver;
307
308         /* Invoke PMD device uninit function */
309         if (*eth_drv->eth_dev_uninit) {
310                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
311                 if (ret)
312                         return ret;
313         }
314
315         /* free ether device */
316         rte_eth_dev_release_port(eth_dev);
317
318         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
319                 rte_free(eth_dev->data->dev_private);
320
321         eth_dev->pci_dev = NULL;
322         eth_dev->driver = NULL;
323         eth_dev->data = NULL;
324
325         return 0;
326 }
327
328 int
329 rte_eth_dev_is_valid_port(uint8_t port_id)
330 {
331         if (port_id >= RTE_MAX_ETHPORTS ||
332             rte_eth_devices[port_id].attached != DEV_ATTACHED)
333                 return 0;
334         else
335                 return 1;
336 }
337
338 int
339 rte_eth_dev_socket_id(uint8_t port_id)
340 {
341         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
342         return rte_eth_devices[port_id].data->numa_node;
343 }
344
345 uint8_t
346 rte_eth_dev_count(void)
347 {
348         return nb_ports;
349 }
350
351 int
352 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
353 {
354         char *tmp;
355
356         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
357
358         if (name == NULL) {
359                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
360                 return -EINVAL;
361         }
362
363         /* shouldn't check 'rte_eth_devices[i].data',
364          * because it might be overwritten by VDEV PMD */
365         tmp = rte_eth_dev_data[port_id].name;
366         strcpy(name, tmp);
367         return 0;
368 }
369
370 int
371 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
372 {
373         int i;
374
375         if (name == NULL) {
376                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
377                 return -EINVAL;
378         }
379
380         *port_id = RTE_MAX_ETHPORTS;
381
382         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
383
384                 if (!strncmp(name,
385                         rte_eth_dev_data[i].name, strlen(name))) {
386
387                         *port_id = i;
388
389                         return 0;
390                 }
391         }
392         return -ENODEV;
393 }
394
395 static int
396 rte_eth_dev_is_detachable(uint8_t port_id)
397 {
398         uint32_t dev_flags;
399
400         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
401
402         switch (rte_eth_devices[port_id].data->kdrv) {
403         case RTE_KDRV_IGB_UIO:
404         case RTE_KDRV_UIO_GENERIC:
405         case RTE_KDRV_NIC_UIO:
406         case RTE_KDRV_NONE:
407                 break;
408         case RTE_KDRV_VFIO:
409         default:
410                 return -ENOTSUP;
411         }
412         dev_flags = rte_eth_devices[port_id].data->dev_flags;
413         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
414                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
415                 return 0;
416         else
417                 return 1;
418 }
419
420 /* attach the new device, then store port_id of the device */
421 int
422 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
423 {
424         int ret = -1;
425         int current = eth_dev_last_created_port;
426         char *name = NULL;
427         char *args = NULL;
428
429         if ((devargs == NULL) || (port_id == NULL)) {
430                 ret = -EINVAL;
431                 goto err;
432         }
433
434         /* parse devargs, then retrieve device name and args */
435         if (rte_eal_parse_devargs_str(devargs, &name, &args))
436                 goto err;
437
438         ret = rte_eal_dev_attach(name, args);
439         if (ret < 0)
440                 goto err;
441
442         /* no point looking at eth_dev_last_created_port if no port exists */
443         if (!nb_ports) {
444                 RTE_LOG(ERR, EAL, "No ports found for device (%s)\n", name);
445                 ret = -1;
446                 goto err;
447         }
448
449         /* if nothing happened, there is a bug here, since some driver told us
450          * it did attach a device, but did not create a port.
451          */
452         if (current == eth_dev_last_created_port) {
453                 ret = -1;
454                 goto err;
455         }
456
457         *port_id = eth_dev_last_created_port;
458         ret = 0;
459
460 err:
461         free(name);
462         free(args);
463         return ret;
464 }
465
466 /* detach the device, then store the name of the device */
467 int
468 rte_eth_dev_detach(uint8_t port_id, char *name)
469 {
470         int ret = -1;
471
472         if (name == NULL) {
473                 ret = -EINVAL;
474                 goto err;
475         }
476
477         /* FIXME: move this to eal, once device flags are relocated there */
478         if (rte_eth_dev_is_detachable(port_id))
479                 goto err;
480
481         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
482                  "%s", rte_eth_devices[port_id].data->name);
483         ret = rte_eal_dev_detach(name);
484         if (ret < 0)
485                 goto err;
486
487         return 0;
488
489 err:
490         return ret;
491 }
492
493 static int
494 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
495 {
496         uint16_t old_nb_queues = dev->data->nb_rx_queues;
497         void **rxq;
498         unsigned i;
499
500         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
501                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
502                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
503                                 RTE_CACHE_LINE_SIZE);
504                 if (dev->data->rx_queues == NULL) {
505                         dev->data->nb_rx_queues = 0;
506                         return -(ENOMEM);
507                 }
508         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
509                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
510
511                 rxq = dev->data->rx_queues;
512
513                 for (i = nb_queues; i < old_nb_queues; i++)
514                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
515                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
516                                 RTE_CACHE_LINE_SIZE);
517                 if (rxq == NULL)
518                         return -(ENOMEM);
519                 if (nb_queues > old_nb_queues) {
520                         uint16_t new_qs = nb_queues - old_nb_queues;
521
522                         memset(rxq + old_nb_queues, 0,
523                                 sizeof(rxq[0]) * new_qs);
524                 }
525
526                 dev->data->rx_queues = rxq;
527
528         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
529                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
530
531                 rxq = dev->data->rx_queues;
532
533                 for (i = nb_queues; i < old_nb_queues; i++)
534                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
535         }
536         dev->data->nb_rx_queues = nb_queues;
537         return 0;
538 }
539
540 int
541 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
542 {
543         struct rte_eth_dev *dev;
544
545         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
546
547         dev = &rte_eth_devices[port_id];
548         if (rx_queue_id >= dev->data->nb_rx_queues) {
549                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
550                 return -EINVAL;
551         }
552
553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
554
555         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
556                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
557                         " already started\n",
558                         rx_queue_id, port_id);
559                 return 0;
560         }
561
562         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
563
564 }
565
566 int
567 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
568 {
569         struct rte_eth_dev *dev;
570
571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
572
573         dev = &rte_eth_devices[port_id];
574         if (rx_queue_id >= dev->data->nb_rx_queues) {
575                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
576                 return -EINVAL;
577         }
578
579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
580
581         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
582                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
583                         " already stopped\n",
584                         rx_queue_id, port_id);
585                 return 0;
586         }
587
588         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
589
590 }
591
592 int
593 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
594 {
595         struct rte_eth_dev *dev;
596
597         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
598
599         dev = &rte_eth_devices[port_id];
600         if (tx_queue_id >= dev->data->nb_tx_queues) {
601                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
602                 return -EINVAL;
603         }
604
605         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
606
607         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
608                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
609                         " already started\n",
610                         tx_queue_id, port_id);
611                 return 0;
612         }
613
614         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
615
616 }
617
618 int
619 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
620 {
621         struct rte_eth_dev *dev;
622
623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
624
625         dev = &rte_eth_devices[port_id];
626         if (tx_queue_id >= dev->data->nb_tx_queues) {
627                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
628                 return -EINVAL;
629         }
630
631         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
632
633         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
634                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
635                         " already stopped\n",
636                         tx_queue_id, port_id);
637                 return 0;
638         }
639
640         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
641
642 }
643
644 static int
645 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
646 {
647         uint16_t old_nb_queues = dev->data->nb_tx_queues;
648         void **txq;
649         unsigned i;
650
651         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
652                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
653                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
654                                                    RTE_CACHE_LINE_SIZE);
655                 if (dev->data->tx_queues == NULL) {
656                         dev->data->nb_tx_queues = 0;
657                         return -(ENOMEM);
658                 }
659         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
660                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
661
662                 txq = dev->data->tx_queues;
663
664                 for (i = nb_queues; i < old_nb_queues; i++)
665                         (*dev->dev_ops->tx_queue_release)(txq[i]);
666                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
667                                   RTE_CACHE_LINE_SIZE);
668                 if (txq == NULL)
669                         return -ENOMEM;
670                 if (nb_queues > old_nb_queues) {
671                         uint16_t new_qs = nb_queues - old_nb_queues;
672
673                         memset(txq + old_nb_queues, 0,
674                                sizeof(txq[0]) * new_qs);
675                 }
676
677                 dev->data->tx_queues = txq;
678
679         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
680                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
681
682                 txq = dev->data->tx_queues;
683
684                 for (i = nb_queues; i < old_nb_queues; i++)
685                         (*dev->dev_ops->tx_queue_release)(txq[i]);
686         }
687         dev->data->nb_tx_queues = nb_queues;
688         return 0;
689 }
690
691 uint32_t
692 rte_eth_speed_bitflag(uint32_t speed, int duplex)
693 {
694         switch (speed) {
695         case ETH_SPEED_NUM_10M:
696                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
697         case ETH_SPEED_NUM_100M:
698                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
699         case ETH_SPEED_NUM_1G:
700                 return ETH_LINK_SPEED_1G;
701         case ETH_SPEED_NUM_2_5G:
702                 return ETH_LINK_SPEED_2_5G;
703         case ETH_SPEED_NUM_5G:
704                 return ETH_LINK_SPEED_5G;
705         case ETH_SPEED_NUM_10G:
706                 return ETH_LINK_SPEED_10G;
707         case ETH_SPEED_NUM_20G:
708                 return ETH_LINK_SPEED_20G;
709         case ETH_SPEED_NUM_25G:
710                 return ETH_LINK_SPEED_25G;
711         case ETH_SPEED_NUM_40G:
712                 return ETH_LINK_SPEED_40G;
713         case ETH_SPEED_NUM_50G:
714                 return ETH_LINK_SPEED_50G;
715         case ETH_SPEED_NUM_56G:
716                 return ETH_LINK_SPEED_56G;
717         case ETH_SPEED_NUM_100G:
718                 return ETH_LINK_SPEED_100G;
719         default:
720                 return 0;
721         }
722 }
723
724 int
725 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
726                       const struct rte_eth_conf *dev_conf)
727 {
728         struct rte_eth_dev *dev;
729         struct rte_eth_dev_info dev_info;
730         int diag;
731
732         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
733
734         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
735                 RTE_PMD_DEBUG_TRACE(
736                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
737                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
738                 return -EINVAL;
739         }
740
741         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
742                 RTE_PMD_DEBUG_TRACE(
743                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
744                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
745                 return -EINVAL;
746         }
747
748         dev = &rte_eth_devices[port_id];
749
750         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
751         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
752
753         if (dev->data->dev_started) {
754                 RTE_PMD_DEBUG_TRACE(
755                     "port %d must be stopped to allow configuration\n", port_id);
756                 return -EBUSY;
757         }
758
759         /* Copy the dev_conf parameter into the dev structure */
760         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
761
762         /*
763          * Check that the numbers of RX and TX queues are not greater
764          * than the maximum number of RX and TX queues supported by the
765          * configured device.
766          */
767         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
768
769         if (nb_rx_q == 0 && nb_tx_q == 0) {
770                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
771                 return -EINVAL;
772         }
773
774         if (nb_rx_q > dev_info.max_rx_queues) {
775                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
776                                 port_id, nb_rx_q, dev_info.max_rx_queues);
777                 return -EINVAL;
778         }
779
780         if (nb_tx_q > dev_info.max_tx_queues) {
781                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
782                                 port_id, nb_tx_q, dev_info.max_tx_queues);
783                 return -EINVAL;
784         }
785
786         /*
787          * If link state interrupt is enabled, check that the
788          * device supports it.
789          */
790         if ((dev_conf->intr_conf.lsc == 1) &&
791                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
792                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
793                                         dev->data->drv_name);
794                         return -EINVAL;
795         }
796
797         /*
798          * If jumbo frames are enabled, check that the maximum RX packet
799          * length is supported by the configured device.
800          */
801         if (dev_conf->rxmode.jumbo_frame == 1) {
802                 if (dev_conf->rxmode.max_rx_pkt_len >
803                     dev_info.max_rx_pktlen) {
804                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
805                                 " > max valid value %u\n",
806                                 port_id,
807                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
808                                 (unsigned)dev_info.max_rx_pktlen);
809                         return -EINVAL;
810                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
811                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
812                                 " < min valid value %u\n",
813                                 port_id,
814                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
815                                 (unsigned)ETHER_MIN_LEN);
816                         return -EINVAL;
817                 }
818         } else {
819                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
820                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
821                         /* Use default value */
822                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
823                                                         ETHER_MAX_LEN;
824         }
825
826         /*
827          * Setup new number of RX/TX queues and reconfigure device.
828          */
829         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
830         if (diag != 0) {
831                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
832                                 port_id, diag);
833                 return diag;
834         }
835
836         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
837         if (diag != 0) {
838                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
839                                 port_id, diag);
840                 rte_eth_dev_rx_queue_config(dev, 0);
841                 return diag;
842         }
843
844         diag = (*dev->dev_ops->dev_configure)(dev);
845         if (diag != 0) {
846                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
847                                 port_id, diag);
848                 rte_eth_dev_rx_queue_config(dev, 0);
849                 rte_eth_dev_tx_queue_config(dev, 0);
850                 return diag;
851         }
852
853         return 0;
854 }
855
856 static void
857 rte_eth_dev_config_restore(uint8_t port_id)
858 {
859         struct rte_eth_dev *dev;
860         struct rte_eth_dev_info dev_info;
861         struct ether_addr addr;
862         uint16_t i;
863         uint32_t pool = 0;
864
865         dev = &rte_eth_devices[port_id];
866
867         rte_eth_dev_info_get(port_id, &dev_info);
868
869         if (RTE_ETH_DEV_SRIOV(dev).active)
870                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
871
872         /* replay MAC address configuration */
873         for (i = 0; i < dev_info.max_mac_addrs; i++) {
874                 addr = dev->data->mac_addrs[i];
875
876                 /* skip zero address */
877                 if (is_zero_ether_addr(&addr))
878                         continue;
879
880                 /* add address to the hardware */
881                 if  (*dev->dev_ops->mac_addr_add &&
882                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
883                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
884                 else {
885                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
886                                         port_id);
887                         /* exit the loop but not return an error */
888                         break;
889                 }
890         }
891
892         /* replay promiscuous configuration */
893         if (rte_eth_promiscuous_get(port_id) == 1)
894                 rte_eth_promiscuous_enable(port_id);
895         else if (rte_eth_promiscuous_get(port_id) == 0)
896                 rte_eth_promiscuous_disable(port_id);
897
898         /* replay all multicast configuration */
899         if (rte_eth_allmulticast_get(port_id) == 1)
900                 rte_eth_allmulticast_enable(port_id);
901         else if (rte_eth_allmulticast_get(port_id) == 0)
902                 rte_eth_allmulticast_disable(port_id);
903 }
904
905 int
906 rte_eth_dev_start(uint8_t port_id)
907 {
908         struct rte_eth_dev *dev;
909         int diag;
910
911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
912
913         dev = &rte_eth_devices[port_id];
914
915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
916
917         if (dev->data->dev_started != 0) {
918                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
919                         " already started\n",
920                         port_id);
921                 return 0;
922         }
923
924         diag = (*dev->dev_ops->dev_start)(dev);
925         if (diag == 0)
926                 dev->data->dev_started = 1;
927         else
928                 return diag;
929
930         rte_eth_dev_config_restore(port_id);
931
932         if (dev->data->dev_conf.intr_conf.lsc == 0) {
933                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
934                 (*dev->dev_ops->link_update)(dev, 0);
935         }
936         return 0;
937 }
938
939 void
940 rte_eth_dev_stop(uint8_t port_id)
941 {
942         struct rte_eth_dev *dev;
943
944         RTE_ETH_VALID_PORTID_OR_RET(port_id);
945         dev = &rte_eth_devices[port_id];
946
947         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
948
949         if (dev->data->dev_started == 0) {
950                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
951                         " already stopped\n",
952                         port_id);
953                 return;
954         }
955
956         dev->data->dev_started = 0;
957         (*dev->dev_ops->dev_stop)(dev);
958 }
959
960 int
961 rte_eth_dev_set_link_up(uint8_t port_id)
962 {
963         struct rte_eth_dev *dev;
964
965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
966
967         dev = &rte_eth_devices[port_id];
968
969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
970         return (*dev->dev_ops->dev_set_link_up)(dev);
971 }
972
973 int
974 rte_eth_dev_set_link_down(uint8_t port_id)
975 {
976         struct rte_eth_dev *dev;
977
978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
979
980         dev = &rte_eth_devices[port_id];
981
982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
983         return (*dev->dev_ops->dev_set_link_down)(dev);
984 }
985
986 void
987 rte_eth_dev_close(uint8_t port_id)
988 {
989         struct rte_eth_dev *dev;
990
991         RTE_ETH_VALID_PORTID_OR_RET(port_id);
992         dev = &rte_eth_devices[port_id];
993
994         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
995         dev->data->dev_started = 0;
996         (*dev->dev_ops->dev_close)(dev);
997
998         rte_free(dev->data->rx_queues);
999         dev->data->rx_queues = NULL;
1000         rte_free(dev->data->tx_queues);
1001         dev->data->tx_queues = NULL;
1002 }
1003
1004 int
1005 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1006                        uint16_t nb_rx_desc, unsigned int socket_id,
1007                        const struct rte_eth_rxconf *rx_conf,
1008                        struct rte_mempool *mp)
1009 {
1010         int ret;
1011         uint32_t mbp_buf_size;
1012         struct rte_eth_dev *dev;
1013         struct rte_eth_dev_info dev_info;
1014
1015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1016
1017         dev = &rte_eth_devices[port_id];
1018         if (rx_queue_id >= dev->data->nb_rx_queues) {
1019                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1020                 return -EINVAL;
1021         }
1022
1023         if (dev->data->dev_started) {
1024                 RTE_PMD_DEBUG_TRACE(
1025                     "port %d must be stopped to allow configuration\n", port_id);
1026                 return -EBUSY;
1027         }
1028
1029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1031
1032         /*
1033          * Check the size of the mbuf data buffer.
1034          * This value must be provided in the private data of the memory pool.
1035          * First check that the memory pool has a valid private data.
1036          */
1037         rte_eth_dev_info_get(port_id, &dev_info);
1038         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1039                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1040                                 mp->name, (int) mp->private_data_size,
1041                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1042                 return -ENOSPC;
1043         }
1044         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1045
1046         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1047                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1048                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1049                                 "=%d)\n",
1050                                 mp->name,
1051                                 (int)mbp_buf_size,
1052                                 (int)(RTE_PKTMBUF_HEADROOM +
1053                                       dev_info.min_rx_bufsize),
1054                                 (int)RTE_PKTMBUF_HEADROOM,
1055                                 (int)dev_info.min_rx_bufsize);
1056                 return -EINVAL;
1057         }
1058
1059         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1060                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1061                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1062
1063                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1064                         "should be: <= %hu, = %hu, and a product of %hu\n",
1065                         nb_rx_desc,
1066                         dev_info.rx_desc_lim.nb_max,
1067                         dev_info.rx_desc_lim.nb_min,
1068                         dev_info.rx_desc_lim.nb_align);
1069                 return -EINVAL;
1070         }
1071
1072         if (rx_conf == NULL)
1073                 rx_conf = &dev_info.default_rxconf;
1074
1075         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1076                                               socket_id, rx_conf, mp);
1077         if (!ret) {
1078                 if (!dev->data->min_rx_buf_size ||
1079                     dev->data->min_rx_buf_size > mbp_buf_size)
1080                         dev->data->min_rx_buf_size = mbp_buf_size;
1081         }
1082
1083         return ret;
1084 }
1085
1086 int
1087 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1088                        uint16_t nb_tx_desc, unsigned int socket_id,
1089                        const struct rte_eth_txconf *tx_conf)
1090 {
1091         struct rte_eth_dev *dev;
1092         struct rte_eth_dev_info dev_info;
1093
1094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1095
1096         dev = &rte_eth_devices[port_id];
1097         if (tx_queue_id >= dev->data->nb_tx_queues) {
1098                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1099                 return -EINVAL;
1100         }
1101
1102         if (dev->data->dev_started) {
1103                 RTE_PMD_DEBUG_TRACE(
1104                     "port %d must be stopped to allow configuration\n", port_id);
1105                 return -EBUSY;
1106         }
1107
1108         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1110
1111         rte_eth_dev_info_get(port_id, &dev_info);
1112
1113         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1114             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1115             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1116                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1117                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1118                                 nb_tx_desc,
1119                                 dev_info.tx_desc_lim.nb_max,
1120                                 dev_info.tx_desc_lim.nb_min,
1121                                 dev_info.tx_desc_lim.nb_align);
1122                 return -EINVAL;
1123         }
1124
1125         if (tx_conf == NULL)
1126                 tx_conf = &dev_info.default_txconf;
1127
1128         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1129                                                socket_id, tx_conf);
1130 }
1131
1132 void
1133 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1134                 void *userdata __rte_unused)
1135 {
1136         unsigned i;
1137
1138         for (i = 0; i < unsent; i++)
1139                 rte_pktmbuf_free(pkts[i]);
1140 }
1141
1142 void
1143 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1144                 void *userdata)
1145 {
1146         uint64_t *count = userdata;
1147         unsigned i;
1148
1149         for (i = 0; i < unsent; i++)
1150                 rte_pktmbuf_free(pkts[i]);
1151
1152         *count += unsent;
1153 }
1154
1155 int
1156 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1157                 buffer_tx_error_fn cbfn, void *userdata)
1158 {
1159         buffer->error_callback = cbfn;
1160         buffer->error_userdata = userdata;
1161         return 0;
1162 }
1163
1164 int
1165 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1166 {
1167         int ret = 0;
1168
1169         if (buffer == NULL)
1170                 return -EINVAL;
1171
1172         buffer->size = size;
1173         if (buffer->error_callback == NULL) {
1174                 ret = rte_eth_tx_buffer_set_err_callback(
1175                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1176         }
1177
1178         return ret;
1179 }
1180
1181 void
1182 rte_eth_promiscuous_enable(uint8_t port_id)
1183 {
1184         struct rte_eth_dev *dev;
1185
1186         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1187         dev = &rte_eth_devices[port_id];
1188
1189         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1190         (*dev->dev_ops->promiscuous_enable)(dev);
1191         dev->data->promiscuous = 1;
1192 }
1193
1194 void
1195 rte_eth_promiscuous_disable(uint8_t port_id)
1196 {
1197         struct rte_eth_dev *dev;
1198
1199         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1200         dev = &rte_eth_devices[port_id];
1201
1202         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1203         dev->data->promiscuous = 0;
1204         (*dev->dev_ops->promiscuous_disable)(dev);
1205 }
1206
1207 int
1208 rte_eth_promiscuous_get(uint8_t port_id)
1209 {
1210         struct rte_eth_dev *dev;
1211
1212         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1213
1214         dev = &rte_eth_devices[port_id];
1215         return dev->data->promiscuous;
1216 }
1217
1218 void
1219 rte_eth_allmulticast_enable(uint8_t port_id)
1220 {
1221         struct rte_eth_dev *dev;
1222
1223         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1224         dev = &rte_eth_devices[port_id];
1225
1226         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1227         (*dev->dev_ops->allmulticast_enable)(dev);
1228         dev->data->all_multicast = 1;
1229 }
1230
1231 void
1232 rte_eth_allmulticast_disable(uint8_t port_id)
1233 {
1234         struct rte_eth_dev *dev;
1235
1236         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1237         dev = &rte_eth_devices[port_id];
1238
1239         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1240         dev->data->all_multicast = 0;
1241         (*dev->dev_ops->allmulticast_disable)(dev);
1242 }
1243
1244 int
1245 rte_eth_allmulticast_get(uint8_t port_id)
1246 {
1247         struct rte_eth_dev *dev;
1248
1249         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1250
1251         dev = &rte_eth_devices[port_id];
1252         return dev->data->all_multicast;
1253 }
1254
1255 static inline int
1256 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1257                                 struct rte_eth_link *link)
1258 {
1259         struct rte_eth_link *dst = link;
1260         struct rte_eth_link *src = &(dev->data->dev_link);
1261
1262         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1263                                         *(uint64_t *)src) == 0)
1264                 return -1;
1265
1266         return 0;
1267 }
1268
1269 void
1270 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1271 {
1272         struct rte_eth_dev *dev;
1273
1274         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1275         dev = &rte_eth_devices[port_id];
1276
1277         if (dev->data->dev_conf.intr_conf.lsc != 0)
1278                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1279         else {
1280                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1281                 (*dev->dev_ops->link_update)(dev, 1);
1282                 *eth_link = dev->data->dev_link;
1283         }
1284 }
1285
1286 void
1287 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1288 {
1289         struct rte_eth_dev *dev;
1290
1291         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1292         dev = &rte_eth_devices[port_id];
1293
1294         if (dev->data->dev_conf.intr_conf.lsc != 0)
1295                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1296         else {
1297                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1298                 (*dev->dev_ops->link_update)(dev, 0);
1299                 *eth_link = dev->data->dev_link;
1300         }
1301 }
1302
1303 int
1304 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1305 {
1306         struct rte_eth_dev *dev;
1307
1308         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1309
1310         dev = &rte_eth_devices[port_id];
1311         memset(stats, 0, sizeof(*stats));
1312
1313         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1314         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1315         (*dev->dev_ops->stats_get)(dev, stats);
1316         return 0;
1317 }
1318
1319 void
1320 rte_eth_stats_reset(uint8_t port_id)
1321 {
1322         struct rte_eth_dev *dev;
1323
1324         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1325         dev = &rte_eth_devices[port_id];
1326
1327         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1328         (*dev->dev_ops->stats_reset)(dev);
1329         dev->data->rx_mbuf_alloc_failed = 0;
1330 }
1331
1332 static int
1333 get_xstats_count(uint8_t port_id)
1334 {
1335         struct rte_eth_dev *dev;
1336         int count;
1337
1338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1339         dev = &rte_eth_devices[port_id];
1340         if (dev->dev_ops->xstats_get_names != NULL) {
1341                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1342                 if (count < 0)
1343                         return count;
1344         } else
1345                 count = 0;
1346         count += RTE_NB_STATS;
1347         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1348         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1349         return count;
1350 }
1351
1352 int
1353 rte_eth_xstats_get_names(uint8_t port_id,
1354         struct rte_eth_xstat_name *xstats_names,
1355         unsigned size)
1356 {
1357         struct rte_eth_dev *dev;
1358         int cnt_used_entries;
1359         int cnt_expected_entries;
1360         int cnt_driver_entries;
1361         uint32_t idx, id_queue;
1362
1363         cnt_expected_entries = get_xstats_count(port_id);
1364         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1365                         (int)size < cnt_expected_entries)
1366                 return cnt_expected_entries;
1367
1368         /* port_id checked in get_xstats_count() */
1369         dev = &rte_eth_devices[port_id];
1370         cnt_used_entries = 0;
1371
1372         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1373                 snprintf(xstats_names[cnt_used_entries].name,
1374                         sizeof(xstats_names[0].name),
1375                         "%s", rte_stats_strings[idx].name);
1376                 cnt_used_entries++;
1377         }
1378         for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
1379                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1380                         snprintf(xstats_names[cnt_used_entries].name,
1381                                 sizeof(xstats_names[0].name),
1382                                 "rx_q%u%s",
1383                                 id_queue, rte_rxq_stats_strings[idx].name);
1384                         cnt_used_entries++;
1385                 }
1386
1387         }
1388         for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
1389                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1390                         snprintf(xstats_names[cnt_used_entries].name,
1391                                 sizeof(xstats_names[0].name),
1392                                 "tx_q%u%s",
1393                                 id_queue, rte_txq_stats_strings[idx].name);
1394                         cnt_used_entries++;
1395                 }
1396         }
1397
1398         if (dev->dev_ops->xstats_get_names != NULL) {
1399                 /* If there are any driver-specific xstats, append them
1400                  * to end of list.
1401                  */
1402                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1403                         dev,
1404                         xstats_names + cnt_used_entries,
1405                         size - cnt_used_entries);
1406                 if (cnt_driver_entries < 0)
1407                         return cnt_driver_entries;
1408                 cnt_used_entries += cnt_driver_entries;
1409         }
1410
1411         return cnt_used_entries;
1412 }
1413
1414 /* retrieve ethdev extended statistics */
1415 int
1416 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1417         unsigned n)
1418 {
1419         struct rte_eth_stats eth_stats;
1420         struct rte_eth_dev *dev;
1421         unsigned count = 0, i, q;
1422         signed xcount = 0;
1423         uint64_t val, *stats_ptr;
1424
1425         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1426
1427         dev = &rte_eth_devices[port_id];
1428
1429         /* Return generic statistics */
1430         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1431                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1432
1433         /* implemented by the driver */
1434         if (dev->dev_ops->xstats_get != NULL) {
1435                 /* Retrieve the xstats from the driver at the end of the
1436                  * xstats struct.
1437                  */
1438                 xcount = (*dev->dev_ops->xstats_get)(dev,
1439                                      xstats ? xstats + count : NULL,
1440                                      (n > count) ? n - count : 0);
1441
1442                 if (xcount < 0)
1443                         return xcount;
1444         }
1445
1446         if (n < count + xcount || xstats == NULL)
1447                 return count + xcount;
1448
1449         /* now fill the xstats structure */
1450         count = 0;
1451         rte_eth_stats_get(port_id, &eth_stats);
1452
1453         /* global stats */
1454         for (i = 0; i < RTE_NB_STATS; i++) {
1455                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1456                                         rte_stats_strings[i].offset);
1457                 val = *stats_ptr;
1458                 xstats[count++].value = val;
1459         }
1460
1461         /* per-rxq stats */
1462         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1463                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1464                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1465                                         rte_rxq_stats_strings[i].offset +
1466                                         q * sizeof(uint64_t));
1467                         val = *stats_ptr;
1468                         xstats[count++].value = val;
1469                 }
1470         }
1471
1472         /* per-txq stats */
1473         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1474                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1475                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1476                                         rte_txq_stats_strings[i].offset +
1477                                         q * sizeof(uint64_t));
1478                         val = *stats_ptr;
1479                         xstats[count++].value = val;
1480                 }
1481         }
1482
1483         for (i = 0; i < count + xcount; i++)
1484                 xstats[i].id = i;
1485
1486         return count + xcount;
1487 }
1488
1489 /* reset ethdev extended statistics */
1490 void
1491 rte_eth_xstats_reset(uint8_t port_id)
1492 {
1493         struct rte_eth_dev *dev;
1494
1495         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1496         dev = &rte_eth_devices[port_id];
1497
1498         /* implemented by the driver */
1499         if (dev->dev_ops->xstats_reset != NULL) {
1500                 (*dev->dev_ops->xstats_reset)(dev);
1501                 return;
1502         }
1503
1504         /* fallback to default */
1505         rte_eth_stats_reset(port_id);
1506 }
1507
1508 static int
1509 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1510                 uint8_t is_rx)
1511 {
1512         struct rte_eth_dev *dev;
1513
1514         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1515
1516         dev = &rte_eth_devices[port_id];
1517
1518         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1519         return (*dev->dev_ops->queue_stats_mapping_set)
1520                         (dev, queue_id, stat_idx, is_rx);
1521 }
1522
1523
1524 int
1525 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1526                 uint8_t stat_idx)
1527 {
1528         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1529                         STAT_QMAP_TX);
1530 }
1531
1532
1533 int
1534 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1535                 uint8_t stat_idx)
1536 {
1537         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1538                         STAT_QMAP_RX);
1539 }
1540
1541 void
1542 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1543 {
1544         struct rte_eth_dev *dev;
1545         const struct rte_eth_desc_lim lim = {
1546                 .nb_max = UINT16_MAX,
1547                 .nb_min = 0,
1548                 .nb_align = 1,
1549         };
1550
1551         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1552         dev = &rte_eth_devices[port_id];
1553
1554         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1555         dev_info->rx_desc_lim = lim;
1556         dev_info->tx_desc_lim = lim;
1557
1558         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1559         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1560         dev_info->pci_dev = dev->pci_dev;
1561         dev_info->driver_name = dev->data->drv_name;
1562         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1563         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1564 }
1565
1566 int
1567 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1568                                  uint32_t *ptypes, int num)
1569 {
1570         int i, j;
1571         struct rte_eth_dev *dev;
1572         const uint32_t *all_ptypes;
1573
1574         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1575         dev = &rte_eth_devices[port_id];
1576         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1577         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1578
1579         if (!all_ptypes)
1580                 return 0;
1581
1582         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1583                 if (all_ptypes[i] & ptype_mask) {
1584                         if (j < num)
1585                                 ptypes[j] = all_ptypes[i];
1586                         j++;
1587                 }
1588
1589         return j;
1590 }
1591
1592 void
1593 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1594 {
1595         struct rte_eth_dev *dev;
1596
1597         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1598         dev = &rte_eth_devices[port_id];
1599         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1600 }
1601
1602
1603 int
1604 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1605 {
1606         struct rte_eth_dev *dev;
1607
1608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1609
1610         dev = &rte_eth_devices[port_id];
1611         *mtu = dev->data->mtu;
1612         return 0;
1613 }
1614
1615 int
1616 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1617 {
1618         int ret;
1619         struct rte_eth_dev *dev;
1620
1621         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1622         dev = &rte_eth_devices[port_id];
1623         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1624
1625         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1626         if (!ret)
1627                 dev->data->mtu = mtu;
1628
1629         return ret;
1630 }
1631
1632 int
1633 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1634 {
1635         struct rte_eth_dev *dev;
1636
1637         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1638         dev = &rte_eth_devices[port_id];
1639         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1640                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1641                 return -ENOSYS;
1642         }
1643
1644         if (vlan_id > 4095) {
1645                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1646                                 port_id, (unsigned) vlan_id);
1647                 return -EINVAL;
1648         }
1649         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1650
1651         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1652 }
1653
1654 int
1655 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1656 {
1657         struct rte_eth_dev *dev;
1658
1659         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1660         dev = &rte_eth_devices[port_id];
1661         if (rx_queue_id >= dev->data->nb_rx_queues) {
1662                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1663                 return -EINVAL;
1664         }
1665
1666         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1667         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1668
1669         return 0;
1670 }
1671
1672 int
1673 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1674                                 enum rte_vlan_type vlan_type,
1675                                 uint16_t tpid)
1676 {
1677         struct rte_eth_dev *dev;
1678
1679         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1680         dev = &rte_eth_devices[port_id];
1681         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1682
1683         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1684 }
1685
1686 int
1687 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1688 {
1689         struct rte_eth_dev *dev;
1690         int ret = 0;
1691         int mask = 0;
1692         int cur, org = 0;
1693
1694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1695         dev = &rte_eth_devices[port_id];
1696
1697         /*check which option changed by application*/
1698         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1699         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1700         if (cur != org) {
1701                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1702                 mask |= ETH_VLAN_STRIP_MASK;
1703         }
1704
1705         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1706         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1707         if (cur != org) {
1708                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1709                 mask |= ETH_VLAN_FILTER_MASK;
1710         }
1711
1712         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1713         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1714         if (cur != org) {
1715                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1716                 mask |= ETH_VLAN_EXTEND_MASK;
1717         }
1718
1719         /*no change*/
1720         if (mask == 0)
1721                 return ret;
1722
1723         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1724         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1725
1726         return ret;
1727 }
1728
1729 int
1730 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1731 {
1732         struct rte_eth_dev *dev;
1733         int ret = 0;
1734
1735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1736         dev = &rte_eth_devices[port_id];
1737
1738         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1739                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1740
1741         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1742                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1743
1744         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1745                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1746
1747         return ret;
1748 }
1749
1750 int
1751 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1752 {
1753         struct rte_eth_dev *dev;
1754
1755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1756         dev = &rte_eth_devices[port_id];
1757         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1758         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1759
1760         return 0;
1761 }
1762
1763 int
1764 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1765 {
1766         struct rte_eth_dev *dev;
1767
1768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1769         dev = &rte_eth_devices[port_id];
1770         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1771         memset(fc_conf, 0, sizeof(*fc_conf));
1772         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1773 }
1774
1775 int
1776 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1777 {
1778         struct rte_eth_dev *dev;
1779
1780         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1781         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1782                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1783                 return -EINVAL;
1784         }
1785
1786         dev = &rte_eth_devices[port_id];
1787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1788         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1789 }
1790
1791 int
1792 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1793 {
1794         struct rte_eth_dev *dev;
1795
1796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1797         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1798                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1799                 return -EINVAL;
1800         }
1801
1802         dev = &rte_eth_devices[port_id];
1803         /* High water, low water validation are device specific */
1804         if  (*dev->dev_ops->priority_flow_ctrl_set)
1805                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1806         return -ENOTSUP;
1807 }
1808
1809 static int
1810 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1811                         uint16_t reta_size)
1812 {
1813         uint16_t i, num;
1814
1815         if (!reta_conf)
1816                 return -EINVAL;
1817
1818         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1819                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1820                                                         RTE_RETA_GROUP_SIZE);
1821                 return -EINVAL;
1822         }
1823
1824         num = reta_size / RTE_RETA_GROUP_SIZE;
1825         for (i = 0; i < num; i++) {
1826                 if (reta_conf[i].mask)
1827                         return 0;
1828         }
1829
1830         return -EINVAL;
1831 }
1832
1833 static int
1834 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1835                          uint16_t reta_size,
1836                          uint16_t max_rxq)
1837 {
1838         uint16_t i, idx, shift;
1839
1840         if (!reta_conf)
1841                 return -EINVAL;
1842
1843         if (max_rxq == 0) {
1844                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1845                 return -EINVAL;
1846         }
1847
1848         for (i = 0; i < reta_size; i++) {
1849                 idx = i / RTE_RETA_GROUP_SIZE;
1850                 shift = i % RTE_RETA_GROUP_SIZE;
1851                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1852                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1853                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1854                                 "the maximum rxq index: %u\n", idx, shift,
1855                                 reta_conf[idx].reta[shift], max_rxq);
1856                         return -EINVAL;
1857                 }
1858         }
1859
1860         return 0;
1861 }
1862
1863 int
1864 rte_eth_dev_rss_reta_update(uint8_t port_id,
1865                             struct rte_eth_rss_reta_entry64 *reta_conf,
1866                             uint16_t reta_size)
1867 {
1868         struct rte_eth_dev *dev;
1869         int ret;
1870
1871         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1872         /* Check mask bits */
1873         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1874         if (ret < 0)
1875                 return ret;
1876
1877         dev = &rte_eth_devices[port_id];
1878
1879         /* Check entry value */
1880         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1881                                 dev->data->nb_rx_queues);
1882         if (ret < 0)
1883                 return ret;
1884
1885         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1886         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1887 }
1888
1889 int
1890 rte_eth_dev_rss_reta_query(uint8_t port_id,
1891                            struct rte_eth_rss_reta_entry64 *reta_conf,
1892                            uint16_t reta_size)
1893 {
1894         struct rte_eth_dev *dev;
1895         int ret;
1896
1897         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1898
1899         /* Check mask bits */
1900         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1901         if (ret < 0)
1902                 return ret;
1903
1904         dev = &rte_eth_devices[port_id];
1905         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1906         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1907 }
1908
1909 int
1910 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1911 {
1912         struct rte_eth_dev *dev;
1913         uint16_t rss_hash_protos;
1914
1915         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1916         rss_hash_protos = rss_conf->rss_hf;
1917         if ((rss_hash_protos != 0) &&
1918             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1919                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1920                                 rss_hash_protos);
1921                 return -EINVAL;
1922         }
1923         dev = &rte_eth_devices[port_id];
1924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1925         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1926 }
1927
1928 int
1929 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1930                               struct rte_eth_rss_conf *rss_conf)
1931 {
1932         struct rte_eth_dev *dev;
1933
1934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1935         dev = &rte_eth_devices[port_id];
1936         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1937         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1938 }
1939
1940 int
1941 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1942                                 struct rte_eth_udp_tunnel *udp_tunnel)
1943 {
1944         struct rte_eth_dev *dev;
1945
1946         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1947         if (udp_tunnel == NULL) {
1948                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1949                 return -EINVAL;
1950         }
1951
1952         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1953                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1954                 return -EINVAL;
1955         }
1956
1957         dev = &rte_eth_devices[port_id];
1958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
1959         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
1960 }
1961
1962 int
1963 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
1964                                    struct rte_eth_udp_tunnel *udp_tunnel)
1965 {
1966         struct rte_eth_dev *dev;
1967
1968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1969         dev = &rte_eth_devices[port_id];
1970
1971         if (udp_tunnel == NULL) {
1972                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1973                 return -EINVAL;
1974         }
1975
1976         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1977                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1978                 return -EINVAL;
1979         }
1980
1981         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
1982         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
1983 }
1984
1985 int
1986 rte_eth_led_on(uint8_t port_id)
1987 {
1988         struct rte_eth_dev *dev;
1989
1990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1991         dev = &rte_eth_devices[port_id];
1992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1993         return (*dev->dev_ops->dev_led_on)(dev);
1994 }
1995
1996 int
1997 rte_eth_led_off(uint8_t port_id)
1998 {
1999         struct rte_eth_dev *dev;
2000
2001         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2002         dev = &rte_eth_devices[port_id];
2003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2004         return (*dev->dev_ops->dev_led_off)(dev);
2005 }
2006
2007 /*
2008  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2009  * an empty spot.
2010  */
2011 static int
2012 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2013 {
2014         struct rte_eth_dev_info dev_info;
2015         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2016         unsigned i;
2017
2018         rte_eth_dev_info_get(port_id, &dev_info);
2019
2020         for (i = 0; i < dev_info.max_mac_addrs; i++)
2021                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2022                         return i;
2023
2024         return -1;
2025 }
2026
2027 static const struct ether_addr null_mac_addr;
2028
2029 int
2030 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2031                         uint32_t pool)
2032 {
2033         struct rte_eth_dev *dev;
2034         int index;
2035         uint64_t pool_mask;
2036
2037         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2038         dev = &rte_eth_devices[port_id];
2039         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2040
2041         if (is_zero_ether_addr(addr)) {
2042                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2043                         port_id);
2044                 return -EINVAL;
2045         }
2046         if (pool >= ETH_64_POOLS) {
2047                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2048                 return -EINVAL;
2049         }
2050
2051         index = get_mac_addr_index(port_id, addr);
2052         if (index < 0) {
2053                 index = get_mac_addr_index(port_id, &null_mac_addr);
2054                 if (index < 0) {
2055                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2056                                 port_id);
2057                         return -ENOSPC;
2058                 }
2059         } else {
2060                 pool_mask = dev->data->mac_pool_sel[index];
2061
2062                 /* Check if both MAC address and pool is already there, and do nothing */
2063                 if (pool_mask & (1ULL << pool))
2064                         return 0;
2065         }
2066
2067         /* Update NIC */
2068         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2069
2070         /* Update address in NIC data structure */
2071         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2072
2073         /* Update pool bitmap in NIC data structure */
2074         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2075
2076         return 0;
2077 }
2078
2079 int
2080 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2081 {
2082         struct rte_eth_dev *dev;
2083         int index;
2084
2085         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2086         dev = &rte_eth_devices[port_id];
2087         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2088
2089         index = get_mac_addr_index(port_id, addr);
2090         if (index == 0) {
2091                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2092                 return -EADDRINUSE;
2093         } else if (index < 0)
2094                 return 0;  /* Do nothing if address wasn't found */
2095
2096         /* Update NIC */
2097         (*dev->dev_ops->mac_addr_remove)(dev, index);
2098
2099         /* Update address in NIC data structure */
2100         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2101
2102         /* reset pool bitmap */
2103         dev->data->mac_pool_sel[index] = 0;
2104
2105         return 0;
2106 }
2107
2108 int
2109 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2110 {
2111         struct rte_eth_dev *dev;
2112
2113         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2114
2115         if (!is_valid_assigned_ether_addr(addr))
2116                 return -EINVAL;
2117
2118         dev = &rte_eth_devices[port_id];
2119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2120
2121         /* Update default address in NIC data structure */
2122         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2123
2124         (*dev->dev_ops->mac_addr_set)(dev, addr);
2125
2126         return 0;
2127 }
2128
2129 int
2130 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2131                                 uint16_t rx_mode, uint8_t on)
2132 {
2133         uint16_t num_vfs;
2134         struct rte_eth_dev *dev;
2135         struct rte_eth_dev_info dev_info;
2136
2137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2138
2139         dev = &rte_eth_devices[port_id];
2140         rte_eth_dev_info_get(port_id, &dev_info);
2141
2142         num_vfs = dev_info.max_vfs;
2143         if (vf > num_vfs) {
2144                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2145                 return -EINVAL;
2146         }
2147
2148         if (rx_mode == 0) {
2149                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2150                 return -EINVAL;
2151         }
2152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2153         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2154 }
2155
2156 /*
2157  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2158  * an empty spot.
2159  */
2160 static int
2161 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2162 {
2163         struct rte_eth_dev_info dev_info;
2164         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2165         unsigned i;
2166
2167         rte_eth_dev_info_get(port_id, &dev_info);
2168         if (!dev->data->hash_mac_addrs)
2169                 return -1;
2170
2171         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2172                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2173                         ETHER_ADDR_LEN) == 0)
2174                         return i;
2175
2176         return -1;
2177 }
2178
2179 int
2180 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2181                                 uint8_t on)
2182 {
2183         int index;
2184         int ret;
2185         struct rte_eth_dev *dev;
2186
2187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2188
2189         dev = &rte_eth_devices[port_id];
2190         if (is_zero_ether_addr(addr)) {
2191                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2192                         port_id);
2193                 return -EINVAL;
2194         }
2195
2196         index = get_hash_mac_addr_index(port_id, addr);
2197         /* Check if it's already there, and do nothing */
2198         if ((index >= 0) && (on))
2199                 return 0;
2200
2201         if (index < 0) {
2202                 if (!on) {
2203                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2204                                 "set in UTA\n", port_id);
2205                         return -EINVAL;
2206                 }
2207
2208                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2209                 if (index < 0) {
2210                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2211                                         port_id);
2212                         return -ENOSPC;
2213                 }
2214         }
2215
2216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2217         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2218         if (ret == 0) {
2219                 /* Update address in NIC data structure */
2220                 if (on)
2221                         ether_addr_copy(addr,
2222                                         &dev->data->hash_mac_addrs[index]);
2223                 else
2224                         ether_addr_copy(&null_mac_addr,
2225                                         &dev->data->hash_mac_addrs[index]);
2226         }
2227
2228         return ret;
2229 }
2230
2231 int
2232 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2233 {
2234         struct rte_eth_dev *dev;
2235
2236         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2237
2238         dev = &rte_eth_devices[port_id];
2239
2240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2241         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2242 }
2243
2244 int
2245 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2246 {
2247         uint16_t num_vfs;
2248         struct rte_eth_dev *dev;
2249         struct rte_eth_dev_info dev_info;
2250
2251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2252
2253         dev = &rte_eth_devices[port_id];
2254         rte_eth_dev_info_get(port_id, &dev_info);
2255
2256         num_vfs = dev_info.max_vfs;
2257         if (vf > num_vfs) {
2258                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2259                 return -EINVAL;
2260         }
2261
2262         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2263         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2264 }
2265
2266 int
2267 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2268 {
2269         uint16_t num_vfs;
2270         struct rte_eth_dev *dev;
2271         struct rte_eth_dev_info dev_info;
2272
2273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2274
2275         dev = &rte_eth_devices[port_id];
2276         rte_eth_dev_info_get(port_id, &dev_info);
2277
2278         num_vfs = dev_info.max_vfs;
2279         if (vf > num_vfs) {
2280                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2281                 return -EINVAL;
2282         }
2283
2284         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2285         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2286 }
2287
2288 int
2289 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2290                                uint64_t vf_mask, uint8_t vlan_on)
2291 {
2292         struct rte_eth_dev *dev;
2293
2294         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2295
2296         dev = &rte_eth_devices[port_id];
2297
2298         if (vlan_id > ETHER_MAX_VLAN_ID) {
2299                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2300                         vlan_id);
2301                 return -EINVAL;
2302         }
2303
2304         if (vf_mask == 0) {
2305                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2306                 return -EINVAL;
2307         }
2308
2309         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2310         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2311                                                    vf_mask, vlan_on);
2312 }
2313
2314 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2315                                         uint16_t tx_rate)
2316 {
2317         struct rte_eth_dev *dev;
2318         struct rte_eth_dev_info dev_info;
2319         struct rte_eth_link link;
2320
2321         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2322
2323         dev = &rte_eth_devices[port_id];
2324         rte_eth_dev_info_get(port_id, &dev_info);
2325         link = dev->data->dev_link;
2326
2327         if (queue_idx > dev_info.max_tx_queues) {
2328                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2329                                 "invalid queue id=%d\n", port_id, queue_idx);
2330                 return -EINVAL;
2331         }
2332
2333         if (tx_rate > link.link_speed) {
2334                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2335                                 "bigger than link speed= %d\n",
2336                         tx_rate, link.link_speed);
2337                 return -EINVAL;
2338         }
2339
2340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2341         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2342 }
2343
2344 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2345                                 uint64_t q_msk)
2346 {
2347         struct rte_eth_dev *dev;
2348         struct rte_eth_dev_info dev_info;
2349         struct rte_eth_link link;
2350
2351         if (q_msk == 0)
2352                 return 0;
2353
2354         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2355
2356         dev = &rte_eth_devices[port_id];
2357         rte_eth_dev_info_get(port_id, &dev_info);
2358         link = dev->data->dev_link;
2359
2360         if (vf > dev_info.max_vfs) {
2361                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2362                                 "invalid vf id=%d\n", port_id, vf);
2363                 return -EINVAL;
2364         }
2365
2366         if (tx_rate > link.link_speed) {
2367                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2368                                 "bigger than link speed= %d\n",
2369                                 tx_rate, link.link_speed);
2370                 return -EINVAL;
2371         }
2372
2373         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2374         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2375 }
2376
2377 int
2378 rte_eth_mirror_rule_set(uint8_t port_id,
2379                         struct rte_eth_mirror_conf *mirror_conf,
2380                         uint8_t rule_id, uint8_t on)
2381 {
2382         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2383
2384         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2385         if (mirror_conf->rule_type == 0) {
2386                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2387                 return -EINVAL;
2388         }
2389
2390         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2391                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2392                                 ETH_64_POOLS - 1);
2393                 return -EINVAL;
2394         }
2395
2396         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2397              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2398             (mirror_conf->pool_mask == 0)) {
2399                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2400                 return -EINVAL;
2401         }
2402
2403         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2404             mirror_conf->vlan.vlan_mask == 0) {
2405                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2406                 return -EINVAL;
2407         }
2408
2409         dev = &rte_eth_devices[port_id];
2410         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2411
2412         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2413 }
2414
2415 int
2416 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2417 {
2418         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2419
2420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2421
2422         dev = &rte_eth_devices[port_id];
2423         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2424
2425         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2426 }
2427
2428 int
2429 rte_eth_dev_callback_register(uint8_t port_id,
2430                         enum rte_eth_event_type event,
2431                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2432 {
2433         struct rte_eth_dev *dev;
2434         struct rte_eth_dev_callback *user_cb;
2435
2436         if (!cb_fn)
2437                 return -EINVAL;
2438
2439         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2440
2441         dev = &rte_eth_devices[port_id];
2442         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2443
2444         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2445                 if (user_cb->cb_fn == cb_fn &&
2446                         user_cb->cb_arg == cb_arg &&
2447                         user_cb->event == event) {
2448                         break;
2449                 }
2450         }
2451
2452         /* create a new callback. */
2453         if (user_cb == NULL)
2454                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2455                                         sizeof(struct rte_eth_dev_callback), 0);
2456         if (user_cb != NULL) {
2457                 user_cb->cb_fn = cb_fn;
2458                 user_cb->cb_arg = cb_arg;
2459                 user_cb->event = event;
2460                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2461         }
2462
2463         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2464         return (user_cb == NULL) ? -ENOMEM : 0;
2465 }
2466
2467 int
2468 rte_eth_dev_callback_unregister(uint8_t port_id,
2469                         enum rte_eth_event_type event,
2470                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2471 {
2472         int ret;
2473         struct rte_eth_dev *dev;
2474         struct rte_eth_dev_callback *cb, *next;
2475
2476         if (!cb_fn)
2477                 return -EINVAL;
2478
2479         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2480
2481         dev = &rte_eth_devices[port_id];
2482         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2483
2484         ret = 0;
2485         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2486
2487                 next = TAILQ_NEXT(cb, next);
2488
2489                 if (cb->cb_fn != cb_fn || cb->event != event ||
2490                                 (cb->cb_arg != (void *)-1 &&
2491                                 cb->cb_arg != cb_arg))
2492                         continue;
2493
2494                 /*
2495                  * if this callback is not executing right now,
2496                  * then remove it.
2497                  */
2498                 if (cb->active == 0) {
2499                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2500                         rte_free(cb);
2501                 } else {
2502                         ret = -EAGAIN;
2503                 }
2504         }
2505
2506         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2507         return ret;
2508 }
2509
2510 void
2511 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2512         enum rte_eth_event_type event)
2513 {
2514         struct rte_eth_dev_callback *cb_lst;
2515         struct rte_eth_dev_callback dev_cb;
2516
2517         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2518         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2519                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2520                         continue;
2521                 dev_cb = *cb_lst;
2522                 cb_lst->active = 1;
2523                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2524                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2525                                                 dev_cb.cb_arg);
2526                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2527                 cb_lst->active = 0;
2528         }
2529         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2530 }
2531
2532 int
2533 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2534 {
2535         uint32_t vec;
2536         struct rte_eth_dev *dev;
2537         struct rte_intr_handle *intr_handle;
2538         uint16_t qid;
2539         int rc;
2540
2541         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2542
2543         dev = &rte_eth_devices[port_id];
2544         intr_handle = &dev->pci_dev->intr_handle;
2545         if (!intr_handle->intr_vec) {
2546                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2547                 return -EPERM;
2548         }
2549
2550         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2551                 vec = intr_handle->intr_vec[qid];
2552                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2553                 if (rc && rc != -EEXIST) {
2554                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2555                                         " op %d epfd %d vec %u\n",
2556                                         port_id, qid, op, epfd, vec);
2557                 }
2558         }
2559
2560         return 0;
2561 }
2562
2563 const struct rte_memzone *
2564 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2565                          uint16_t queue_id, size_t size, unsigned align,
2566                          int socket_id)
2567 {
2568         char z_name[RTE_MEMZONE_NAMESIZE];
2569         const struct rte_memzone *mz;
2570
2571         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2572                  dev->driver->pci_drv.name, ring_name,
2573                  dev->data->port_id, queue_id);
2574
2575         mz = rte_memzone_lookup(z_name);
2576         if (mz)
2577                 return mz;
2578
2579         if (rte_xen_dom0_supported())
2580                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2581                                                    0, align, RTE_PGSIZE_2M);
2582         else
2583                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2584                                                    0, align);
2585 }
2586
2587 int
2588 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2589                           int epfd, int op, void *data)
2590 {
2591         uint32_t vec;
2592         struct rte_eth_dev *dev;
2593         struct rte_intr_handle *intr_handle;
2594         int rc;
2595
2596         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2597
2598         dev = &rte_eth_devices[port_id];
2599         if (queue_id >= dev->data->nb_rx_queues) {
2600                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2601                 return -EINVAL;
2602         }
2603
2604         intr_handle = &dev->pci_dev->intr_handle;
2605         if (!intr_handle->intr_vec) {
2606                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2607                 return -EPERM;
2608         }
2609
2610         vec = intr_handle->intr_vec[queue_id];
2611         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2612         if (rc && rc != -EEXIST) {
2613                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2614                                 " op %d epfd %d vec %u\n",
2615                                 port_id, queue_id, op, epfd, vec);
2616                 return rc;
2617         }
2618
2619         return 0;
2620 }
2621
2622 int
2623 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2624                            uint16_t queue_id)
2625 {
2626         struct rte_eth_dev *dev;
2627
2628         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2629
2630         dev = &rte_eth_devices[port_id];
2631
2632         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2633         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2634 }
2635
2636 int
2637 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2638                             uint16_t queue_id)
2639 {
2640         struct rte_eth_dev *dev;
2641
2642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2643
2644         dev = &rte_eth_devices[port_id];
2645
2646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2647         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2648 }
2649
2650 #ifdef RTE_NIC_BYPASS
2651 int rte_eth_dev_bypass_init(uint8_t port_id)
2652 {
2653         struct rte_eth_dev *dev;
2654
2655         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2656
2657         dev = &rte_eth_devices[port_id];
2658         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2659         (*dev->dev_ops->bypass_init)(dev);
2660         return 0;
2661 }
2662
2663 int
2664 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2665 {
2666         struct rte_eth_dev *dev;
2667
2668         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2669
2670         dev = &rte_eth_devices[port_id];
2671         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2672         (*dev->dev_ops->bypass_state_show)(dev, state);
2673         return 0;
2674 }
2675
2676 int
2677 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2678 {
2679         struct rte_eth_dev *dev;
2680
2681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2682
2683         dev = &rte_eth_devices[port_id];
2684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2685         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2686         return 0;
2687 }
2688
2689 int
2690 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2691 {
2692         struct rte_eth_dev *dev;
2693
2694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2695
2696         dev = &rte_eth_devices[port_id];
2697         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2698         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2699         return 0;
2700 }
2701
2702 int
2703 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2704 {
2705         struct rte_eth_dev *dev;
2706
2707         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2708
2709         dev = &rte_eth_devices[port_id];
2710
2711         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2712         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2713         return 0;
2714 }
2715
2716 int
2717 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2718 {
2719         struct rte_eth_dev *dev;
2720
2721         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2722
2723         dev = &rte_eth_devices[port_id];
2724
2725         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2726         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2727         return 0;
2728 }
2729
2730 int
2731 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2732 {
2733         struct rte_eth_dev *dev;
2734
2735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2736
2737         dev = &rte_eth_devices[port_id];
2738
2739         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2740         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2741         return 0;
2742 }
2743
2744 int
2745 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2746 {
2747         struct rte_eth_dev *dev;
2748
2749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2750
2751         dev = &rte_eth_devices[port_id];
2752
2753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2754         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2755         return 0;
2756 }
2757
2758 int
2759 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2760 {
2761         struct rte_eth_dev *dev;
2762
2763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2764
2765         dev = &rte_eth_devices[port_id];
2766
2767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2768         (*dev->dev_ops->bypass_wd_reset)(dev);
2769         return 0;
2770 }
2771 #endif
2772
2773 int
2774 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2775 {
2776         struct rte_eth_dev *dev;
2777
2778         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2779
2780         dev = &rte_eth_devices[port_id];
2781         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2782         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2783                                 RTE_ETH_FILTER_NOP, NULL);
2784 }
2785
2786 int
2787 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2788                        enum rte_filter_op filter_op, void *arg)
2789 {
2790         struct rte_eth_dev *dev;
2791
2792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2793
2794         dev = &rte_eth_devices[port_id];
2795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2796         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2797 }
2798
2799 void *
2800 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2801                 rte_rx_callback_fn fn, void *user_param)
2802 {
2803 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2804         rte_errno = ENOTSUP;
2805         return NULL;
2806 #endif
2807         /* check input parameters */
2808         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2809                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2810                 rte_errno = EINVAL;
2811                 return NULL;
2812         }
2813         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2814
2815         if (cb == NULL) {
2816                 rte_errno = ENOMEM;
2817                 return NULL;
2818         }
2819
2820         cb->fn.rx = fn;
2821         cb->param = user_param;
2822
2823         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2824         /* Add the callbacks in fifo order. */
2825         struct rte_eth_rxtx_callback *tail =
2826                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2827
2828         if (!tail) {
2829                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2830
2831         } else {
2832                 while (tail->next)
2833                         tail = tail->next;
2834                 tail->next = cb;
2835         }
2836         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2837
2838         return cb;
2839 }
2840
2841 void *
2842 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2843                 rte_rx_callback_fn fn, void *user_param)
2844 {
2845 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2846         rte_errno = ENOTSUP;
2847         return NULL;
2848 #endif
2849         /* check input parameters */
2850         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2851                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2852                 rte_errno = EINVAL;
2853                 return NULL;
2854         }
2855
2856         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2857
2858         if (cb == NULL) {
2859                 rte_errno = ENOMEM;
2860                 return NULL;
2861         }
2862
2863         cb->fn.rx = fn;
2864         cb->param = user_param;
2865
2866         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2867         /* Add the callbacks at fisrt position*/
2868         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2869         rte_smp_wmb();
2870         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2871         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2872
2873         return cb;
2874 }
2875
2876 void *
2877 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2878                 rte_tx_callback_fn fn, void *user_param)
2879 {
2880 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2881         rte_errno = ENOTSUP;
2882         return NULL;
2883 #endif
2884         /* check input parameters */
2885         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2886                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2887                 rte_errno = EINVAL;
2888                 return NULL;
2889         }
2890
2891         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2892
2893         if (cb == NULL) {
2894                 rte_errno = ENOMEM;
2895                 return NULL;
2896         }
2897
2898         cb->fn.tx = fn;
2899         cb->param = user_param;
2900
2901         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2902         /* Add the callbacks in fifo order. */
2903         struct rte_eth_rxtx_callback *tail =
2904                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2905
2906         if (!tail) {
2907                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2908
2909         } else {
2910                 while (tail->next)
2911                         tail = tail->next;
2912                 tail->next = cb;
2913         }
2914         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2915
2916         return cb;
2917 }
2918
2919 int
2920 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2921                 struct rte_eth_rxtx_callback *user_cb)
2922 {
2923 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2924         return -ENOTSUP;
2925 #endif
2926         /* Check input parameters. */
2927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2928         if (user_cb == NULL ||
2929                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2930                 return -EINVAL;
2931
2932         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2933         struct rte_eth_rxtx_callback *cb;
2934         struct rte_eth_rxtx_callback **prev_cb;
2935         int ret = -EINVAL;
2936
2937         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2938         prev_cb = &dev->post_rx_burst_cbs[queue_id];
2939         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2940                 cb = *prev_cb;
2941                 if (cb == user_cb) {
2942                         /* Remove the user cb from the callback list. */
2943                         *prev_cb = cb->next;
2944                         ret = 0;
2945                         break;
2946                 }
2947         }
2948         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2949
2950         return ret;
2951 }
2952
2953 int
2954 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2955                 struct rte_eth_rxtx_callback *user_cb)
2956 {
2957 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2958         return -ENOTSUP;
2959 #endif
2960         /* Check input parameters. */
2961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2962         if (user_cb == NULL ||
2963                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
2964                 return -EINVAL;
2965
2966         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2967         int ret = -EINVAL;
2968         struct rte_eth_rxtx_callback *cb;
2969         struct rte_eth_rxtx_callback **prev_cb;
2970
2971         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2972         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
2973         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2974                 cb = *prev_cb;
2975                 if (cb == user_cb) {
2976                         /* Remove the user cb from the callback list. */
2977                         *prev_cb = cb->next;
2978                         ret = 0;
2979                         break;
2980                 }
2981         }
2982         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2983
2984         return ret;
2985 }
2986
2987 int
2988 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
2989         struct rte_eth_rxq_info *qinfo)
2990 {
2991         struct rte_eth_dev *dev;
2992
2993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2994
2995         if (qinfo == NULL)
2996                 return -EINVAL;
2997
2998         dev = &rte_eth_devices[port_id];
2999         if (queue_id >= dev->data->nb_rx_queues) {
3000                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3001                 return -EINVAL;
3002         }
3003
3004         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3005
3006         memset(qinfo, 0, sizeof(*qinfo));
3007         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3008         return 0;
3009 }
3010
3011 int
3012 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3013         struct rte_eth_txq_info *qinfo)
3014 {
3015         struct rte_eth_dev *dev;
3016
3017         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3018
3019         if (qinfo == NULL)
3020                 return -EINVAL;
3021
3022         dev = &rte_eth_devices[port_id];
3023         if (queue_id >= dev->data->nb_tx_queues) {
3024                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3025                 return -EINVAL;
3026         }
3027
3028         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3029
3030         memset(qinfo, 0, sizeof(*qinfo));
3031         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3032         return 0;
3033 }
3034
3035 int
3036 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3037                              struct ether_addr *mc_addr_set,
3038                              uint32_t nb_mc_addr)
3039 {
3040         struct rte_eth_dev *dev;
3041
3042         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3043
3044         dev = &rte_eth_devices[port_id];
3045         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3046         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3047 }
3048
3049 int
3050 rte_eth_timesync_enable(uint8_t port_id)
3051 {
3052         struct rte_eth_dev *dev;
3053
3054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3055         dev = &rte_eth_devices[port_id];
3056
3057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3058         return (*dev->dev_ops->timesync_enable)(dev);
3059 }
3060
3061 int
3062 rte_eth_timesync_disable(uint8_t port_id)
3063 {
3064         struct rte_eth_dev *dev;
3065
3066         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3067         dev = &rte_eth_devices[port_id];
3068
3069         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3070         return (*dev->dev_ops->timesync_disable)(dev);
3071 }
3072
3073 int
3074 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3075                                    uint32_t flags)
3076 {
3077         struct rte_eth_dev *dev;
3078
3079         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3080         dev = &rte_eth_devices[port_id];
3081
3082         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3083         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3084 }
3085
3086 int
3087 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3088 {
3089         struct rte_eth_dev *dev;
3090
3091         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3092         dev = &rte_eth_devices[port_id];
3093
3094         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3095         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3096 }
3097
3098 int
3099 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3100 {
3101         struct rte_eth_dev *dev;
3102
3103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3104         dev = &rte_eth_devices[port_id];
3105
3106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3107         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3108 }
3109
3110 int
3111 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3112 {
3113         struct rte_eth_dev *dev;
3114
3115         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3116         dev = &rte_eth_devices[port_id];
3117
3118         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3119         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3120 }
3121
3122 int
3123 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3124 {
3125         struct rte_eth_dev *dev;
3126
3127         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3128         dev = &rte_eth_devices[port_id];
3129
3130         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3131         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3132 }
3133
3134 int
3135 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3136 {
3137         struct rte_eth_dev *dev;
3138
3139         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3140
3141         dev = &rte_eth_devices[port_id];
3142         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3143         return (*dev->dev_ops->get_reg)(dev, info);
3144 }
3145
3146 int
3147 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3148 {
3149         struct rte_eth_dev *dev;
3150
3151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152
3153         dev = &rte_eth_devices[port_id];
3154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3155         return (*dev->dev_ops->get_eeprom_length)(dev);
3156 }
3157
3158 int
3159 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3160 {
3161         struct rte_eth_dev *dev;
3162
3163         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3164
3165         dev = &rte_eth_devices[port_id];
3166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3167         return (*dev->dev_ops->get_eeprom)(dev, info);
3168 }
3169
3170 int
3171 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3172 {
3173         struct rte_eth_dev *dev;
3174
3175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3176
3177         dev = &rte_eth_devices[port_id];
3178         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3179         return (*dev->dev_ops->set_eeprom)(dev, info);
3180 }
3181
3182 int
3183 rte_eth_dev_get_dcb_info(uint8_t port_id,
3184                              struct rte_eth_dcb_info *dcb_info)
3185 {
3186         struct rte_eth_dev *dev;
3187
3188         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3189
3190         dev = &rte_eth_devices[port_id];
3191         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3192
3193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3194         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3195 }
3196
3197 void
3198 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3199 {
3200         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3201                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3202                                 eth_dev, pci_dev);
3203                 return;
3204         }
3205
3206         eth_dev->data->dev_flags = 0;
3207         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3208                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3209         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3210                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3211
3212         eth_dev->data->kdrv = pci_dev->kdrv;
3213         eth_dev->data->numa_node = pci_dev->numa_node;
3214         eth_dev->data->drv_name = pci_dev->driver->name;
3215 }
3216
3217 int
3218 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3219                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3220 {
3221         struct rte_eth_dev *dev;
3222
3223         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3224         if (l2_tunnel == NULL) {
3225                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3226                 return -EINVAL;
3227         }
3228
3229         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3230                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3231                 return -EINVAL;
3232         }
3233
3234         dev = &rte_eth_devices[port_id];
3235         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3236                                 -ENOTSUP);
3237         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3238 }
3239
3240 int
3241 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3242                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3243                                   uint32_t mask,
3244                                   uint8_t en)
3245 {
3246         struct rte_eth_dev *dev;
3247
3248         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3249
3250         if (l2_tunnel == NULL) {
3251                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3252                 return -EINVAL;
3253         }
3254
3255         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3256                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3257                 return -EINVAL;
3258         }
3259
3260         if (mask == 0) {
3261                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3262                 return -EINVAL;
3263         }
3264
3265         dev = &rte_eth_devices[port_id];
3266         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3267                                 -ENOTSUP);
3268         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3269 }