ethdev: free queue array after releasing all queues
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 struct rte_eth_dev *
193 rte_eth_dev_allocate(const char *name)
194 {
195         uint8_t port_id;
196         struct rte_eth_dev *eth_dev;
197
198         port_id = rte_eth_dev_find_free_port();
199         if (port_id == RTE_MAX_ETHPORTS) {
200                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201                 return NULL;
202         }
203
204         if (rte_eth_dev_data == NULL)
205                 rte_eth_dev_data_alloc();
206
207         if (rte_eth_dev_allocated(name) != NULL) {
208                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209                                 name);
210                 return NULL;
211         }
212
213         eth_dev = &rte_eth_devices[port_id];
214         eth_dev->data = &rte_eth_dev_data[port_id];
215         memset(eth_dev->data, 0, sizeof(*eth_dev->data));
216         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
217         eth_dev->data->port_id = port_id;
218         eth_dev->data->mtu = ETHER_MTU;
219         TAILQ_INIT(&(eth_dev->link_intr_cbs));
220
221         eth_dev->attached = DEV_ATTACHED;
222         eth_dev_last_created_port = port_id;
223         nb_ports++;
224         return eth_dev;
225 }
226
227 int
228 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
229 {
230         if (eth_dev == NULL)
231                 return -EINVAL;
232
233         eth_dev->attached = DEV_DETACHED;
234         nb_ports--;
235         return 0;
236 }
237
238 int
239 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
240                       struct rte_pci_device *pci_dev)
241 {
242         struct eth_driver    *eth_drv;
243         struct rte_eth_dev *eth_dev;
244         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
245
246         int diag;
247
248         eth_drv = (struct eth_driver *)pci_drv;
249
250         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
251                         sizeof(ethdev_name));
252
253         eth_dev = rte_eth_dev_allocate(ethdev_name);
254         if (eth_dev == NULL)
255                 return -ENOMEM;
256
257         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
258                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
259                                   eth_drv->dev_private_size,
260                                   RTE_CACHE_LINE_SIZE);
261                 if (eth_dev->data->dev_private == NULL)
262                         rte_panic("Cannot allocate memzone for private port data\n");
263         }
264         eth_dev->pci_dev = pci_dev;
265         eth_dev->driver = eth_drv;
266
267         /* Invoke PMD device initialization function */
268         diag = (*eth_drv->eth_dev_init)(eth_dev);
269         if (diag == 0)
270                 return 0;
271
272         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
273                         pci_drv->driver.name,
274                         (unsigned) pci_dev->id.vendor_id,
275                         (unsigned) pci_dev->id.device_id);
276         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
277                 rte_free(eth_dev->data->dev_private);
278         rte_eth_dev_release_port(eth_dev);
279         return diag;
280 }
281
282 int
283 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
284 {
285         const struct eth_driver *eth_drv;
286         struct rte_eth_dev *eth_dev;
287         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
288         int ret;
289
290         if (pci_dev == NULL)
291                 return -EINVAL;
292
293         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
294                         sizeof(ethdev_name));
295
296         eth_dev = rte_eth_dev_allocated(ethdev_name);
297         if (eth_dev == NULL)
298                 return -ENODEV;
299
300         eth_drv = (const struct eth_driver *)pci_dev->driver;
301
302         /* Invoke PMD device uninit function */
303         if (*eth_drv->eth_dev_uninit) {
304                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
305                 if (ret)
306                         return ret;
307         }
308
309         /* free ether device */
310         rte_eth_dev_release_port(eth_dev);
311
312         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
313                 rte_free(eth_dev->data->dev_private);
314
315         eth_dev->pci_dev = NULL;
316         eth_dev->driver = NULL;
317         eth_dev->data = NULL;
318
319         return 0;
320 }
321
322 int
323 rte_eth_dev_is_valid_port(uint8_t port_id)
324 {
325         if (port_id >= RTE_MAX_ETHPORTS ||
326             rte_eth_devices[port_id].attached != DEV_ATTACHED)
327                 return 0;
328         else
329                 return 1;
330 }
331
332 int
333 rte_eth_dev_socket_id(uint8_t port_id)
334 {
335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
336         return rte_eth_devices[port_id].data->numa_node;
337 }
338
339 uint8_t
340 rte_eth_dev_count(void)
341 {
342         return nb_ports;
343 }
344
345 int
346 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
347 {
348         char *tmp;
349
350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
351
352         if (name == NULL) {
353                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
354                 return -EINVAL;
355         }
356
357         /* shouldn't check 'rte_eth_devices[i].data',
358          * because it might be overwritten by VDEV PMD */
359         tmp = rte_eth_dev_data[port_id].name;
360         strcpy(name, tmp);
361         return 0;
362 }
363
364 int
365 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
366 {
367         int i;
368
369         if (name == NULL) {
370                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
371                 return -EINVAL;
372         }
373
374         if (!nb_ports)
375                 return -ENODEV;
376
377         *port_id = RTE_MAX_ETHPORTS;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380
381                 if (!strncmp(name,
382                         rte_eth_dev_data[i].name, strlen(name))) {
383
384                         *port_id = i;
385
386                         return 0;
387                 }
388         }
389         return -ENODEV;
390 }
391
392 static int
393 rte_eth_dev_is_detachable(uint8_t port_id)
394 {
395         uint32_t dev_flags;
396
397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
398
399         switch (rte_eth_devices[port_id].data->kdrv) {
400         case RTE_KDRV_IGB_UIO:
401         case RTE_KDRV_UIO_GENERIC:
402         case RTE_KDRV_NIC_UIO:
403         case RTE_KDRV_NONE:
404                 break;
405         case RTE_KDRV_VFIO:
406         default:
407                 return -ENOTSUP;
408         }
409         dev_flags = rte_eth_devices[port_id].data->dev_flags;
410         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
411                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
412                 return 0;
413         else
414                 return 1;
415 }
416
417 /* attach the new device, then store port_id of the device */
418 int
419 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
420 {
421         int ret = -1;
422         int current = rte_eth_dev_count();
423         char *name = NULL;
424         char *args = NULL;
425
426         if ((devargs == NULL) || (port_id == NULL)) {
427                 ret = -EINVAL;
428                 goto err;
429         }
430
431         /* parse devargs, then retrieve device name and args */
432         if (rte_eal_parse_devargs_str(devargs, &name, &args))
433                 goto err;
434
435         ret = rte_eal_dev_attach(name, args);
436         if (ret < 0)
437                 goto err;
438
439         /* no point looking at the port count if no port exists */
440         if (!rte_eth_dev_count()) {
441                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
442                 ret = -1;
443                 goto err;
444         }
445
446         /* if nothing happened, there is a bug here, since some driver told us
447          * it did attach a device, but did not create a port.
448          */
449         if (current == rte_eth_dev_count()) {
450                 ret = -1;
451                 goto err;
452         }
453
454         *port_id = eth_dev_last_created_port;
455         ret = 0;
456
457 err:
458         free(name);
459         free(args);
460         return ret;
461 }
462
463 /* detach the device, then store the name of the device */
464 int
465 rte_eth_dev_detach(uint8_t port_id, char *name)
466 {
467         int ret = -1;
468
469         if (name == NULL) {
470                 ret = -EINVAL;
471                 goto err;
472         }
473
474         /* FIXME: move this to eal, once device flags are relocated there */
475         if (rte_eth_dev_is_detachable(port_id))
476                 goto err;
477
478         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
479                  "%s", rte_eth_devices[port_id].data->name);
480         ret = rte_eal_dev_detach(name);
481         if (ret < 0)
482                 goto err;
483
484         return 0;
485
486 err:
487         return ret;
488 }
489
490 static int
491 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
492 {
493         uint16_t old_nb_queues = dev->data->nb_rx_queues;
494         void **rxq;
495         unsigned i;
496
497         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
498                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
499                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
500                                 RTE_CACHE_LINE_SIZE);
501                 if (dev->data->rx_queues == NULL) {
502                         dev->data->nb_rx_queues = 0;
503                         return -(ENOMEM);
504                 }
505         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
506                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
507
508                 rxq = dev->data->rx_queues;
509
510                 for (i = nb_queues; i < old_nb_queues; i++)
511                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
512                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
513                                 RTE_CACHE_LINE_SIZE);
514                 if (rxq == NULL)
515                         return -(ENOMEM);
516                 if (nb_queues > old_nb_queues) {
517                         uint16_t new_qs = nb_queues - old_nb_queues;
518
519                         memset(rxq + old_nb_queues, 0,
520                                 sizeof(rxq[0]) * new_qs);
521                 }
522
523                 dev->data->rx_queues = rxq;
524
525         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
526                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
527
528                 rxq = dev->data->rx_queues;
529
530                 for (i = nb_queues; i < old_nb_queues; i++)
531                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
532
533                 rte_free(dev->data->rx_queues);
534                 dev->data->rx_queues = NULL;
535         }
536         dev->data->nb_rx_queues = nb_queues;
537         return 0;
538 }
539
540 int
541 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
542 {
543         struct rte_eth_dev *dev;
544
545         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
546
547         dev = &rte_eth_devices[port_id];
548         if (rx_queue_id >= dev->data->nb_rx_queues) {
549                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
550                 return -EINVAL;
551         }
552
553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
554
555         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
556                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
557                         " already started\n",
558                         rx_queue_id, port_id);
559                 return 0;
560         }
561
562         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
563
564 }
565
566 int
567 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
568 {
569         struct rte_eth_dev *dev;
570
571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
572
573         dev = &rte_eth_devices[port_id];
574         if (rx_queue_id >= dev->data->nb_rx_queues) {
575                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
576                 return -EINVAL;
577         }
578
579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
580
581         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
582                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
583                         " already stopped\n",
584                         rx_queue_id, port_id);
585                 return 0;
586         }
587
588         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
589
590 }
591
592 int
593 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
594 {
595         struct rte_eth_dev *dev;
596
597         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
598
599         dev = &rte_eth_devices[port_id];
600         if (tx_queue_id >= dev->data->nb_tx_queues) {
601                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
602                 return -EINVAL;
603         }
604
605         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
606
607         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
608                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
609                         " already started\n",
610                         tx_queue_id, port_id);
611                 return 0;
612         }
613
614         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
615
616 }
617
618 int
619 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
620 {
621         struct rte_eth_dev *dev;
622
623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
624
625         dev = &rte_eth_devices[port_id];
626         if (tx_queue_id >= dev->data->nb_tx_queues) {
627                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
628                 return -EINVAL;
629         }
630
631         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
632
633         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
634                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
635                         " already stopped\n",
636                         tx_queue_id, port_id);
637                 return 0;
638         }
639
640         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
641
642 }
643
644 static int
645 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
646 {
647         uint16_t old_nb_queues = dev->data->nb_tx_queues;
648         void **txq;
649         unsigned i;
650
651         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
652                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
653                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
654                                                    RTE_CACHE_LINE_SIZE);
655                 if (dev->data->tx_queues == NULL) {
656                         dev->data->nb_tx_queues = 0;
657                         return -(ENOMEM);
658                 }
659         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
660                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
661
662                 txq = dev->data->tx_queues;
663
664                 for (i = nb_queues; i < old_nb_queues; i++)
665                         (*dev->dev_ops->tx_queue_release)(txq[i]);
666                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
667                                   RTE_CACHE_LINE_SIZE);
668                 if (txq == NULL)
669                         return -ENOMEM;
670                 if (nb_queues > old_nb_queues) {
671                         uint16_t new_qs = nb_queues - old_nb_queues;
672
673                         memset(txq + old_nb_queues, 0,
674                                sizeof(txq[0]) * new_qs);
675                 }
676
677                 dev->data->tx_queues = txq;
678
679         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
680                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
681
682                 txq = dev->data->tx_queues;
683
684                 for (i = nb_queues; i < old_nb_queues; i++)
685                         (*dev->dev_ops->tx_queue_release)(txq[i]);
686
687                 rte_free(dev->data->tx_queues);
688                 dev->data->tx_queues = NULL;
689         }
690         dev->data->nb_tx_queues = nb_queues;
691         return 0;
692 }
693
694 uint32_t
695 rte_eth_speed_bitflag(uint32_t speed, int duplex)
696 {
697         switch (speed) {
698         case ETH_SPEED_NUM_10M:
699                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
700         case ETH_SPEED_NUM_100M:
701                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
702         case ETH_SPEED_NUM_1G:
703                 return ETH_LINK_SPEED_1G;
704         case ETH_SPEED_NUM_2_5G:
705                 return ETH_LINK_SPEED_2_5G;
706         case ETH_SPEED_NUM_5G:
707                 return ETH_LINK_SPEED_5G;
708         case ETH_SPEED_NUM_10G:
709                 return ETH_LINK_SPEED_10G;
710         case ETH_SPEED_NUM_20G:
711                 return ETH_LINK_SPEED_20G;
712         case ETH_SPEED_NUM_25G:
713                 return ETH_LINK_SPEED_25G;
714         case ETH_SPEED_NUM_40G:
715                 return ETH_LINK_SPEED_40G;
716         case ETH_SPEED_NUM_50G:
717                 return ETH_LINK_SPEED_50G;
718         case ETH_SPEED_NUM_56G:
719                 return ETH_LINK_SPEED_56G;
720         case ETH_SPEED_NUM_100G:
721                 return ETH_LINK_SPEED_100G;
722         default:
723                 return 0;
724         }
725 }
726
727 int
728 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
729                       const struct rte_eth_conf *dev_conf)
730 {
731         struct rte_eth_dev *dev;
732         struct rte_eth_dev_info dev_info;
733         int diag;
734
735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
736
737         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
738                 RTE_PMD_DEBUG_TRACE(
739                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
740                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
741                 return -EINVAL;
742         }
743
744         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
745                 RTE_PMD_DEBUG_TRACE(
746                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
747                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
748                 return -EINVAL;
749         }
750
751         dev = &rte_eth_devices[port_id];
752
753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
754         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
755
756         if (dev->data->dev_started) {
757                 RTE_PMD_DEBUG_TRACE(
758                     "port %d must be stopped to allow configuration\n", port_id);
759                 return -EBUSY;
760         }
761
762         /* Copy the dev_conf parameter into the dev structure */
763         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
764
765         /*
766          * Check that the numbers of RX and TX queues are not greater
767          * than the maximum number of RX and TX queues supported by the
768          * configured device.
769          */
770         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
771
772         if (nb_rx_q == 0 && nb_tx_q == 0) {
773                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
774                 return -EINVAL;
775         }
776
777         if (nb_rx_q > dev_info.max_rx_queues) {
778                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
779                                 port_id, nb_rx_q, dev_info.max_rx_queues);
780                 return -EINVAL;
781         }
782
783         if (nb_tx_q > dev_info.max_tx_queues) {
784                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
785                                 port_id, nb_tx_q, dev_info.max_tx_queues);
786                 return -EINVAL;
787         }
788
789         /*
790          * If link state interrupt is enabled, check that the
791          * device supports it.
792          */
793         if ((dev_conf->intr_conf.lsc == 1) &&
794                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
795                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
796                                         dev->data->drv_name);
797                         return -EINVAL;
798         }
799
800         /*
801          * If jumbo frames are enabled, check that the maximum RX packet
802          * length is supported by the configured device.
803          */
804         if (dev_conf->rxmode.jumbo_frame == 1) {
805                 if (dev_conf->rxmode.max_rx_pkt_len >
806                     dev_info.max_rx_pktlen) {
807                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
808                                 " > max valid value %u\n",
809                                 port_id,
810                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
811                                 (unsigned)dev_info.max_rx_pktlen);
812                         return -EINVAL;
813                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
814                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
815                                 " < min valid value %u\n",
816                                 port_id,
817                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
818                                 (unsigned)ETHER_MIN_LEN);
819                         return -EINVAL;
820                 }
821         } else {
822                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
823                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
824                         /* Use default value */
825                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
826                                                         ETHER_MAX_LEN;
827         }
828
829         /*
830          * Setup new number of RX/TX queues and reconfigure device.
831          */
832         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
833         if (diag != 0) {
834                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
835                                 port_id, diag);
836                 return diag;
837         }
838
839         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
840         if (diag != 0) {
841                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
842                                 port_id, diag);
843                 rte_eth_dev_rx_queue_config(dev, 0);
844                 return diag;
845         }
846
847         diag = (*dev->dev_ops->dev_configure)(dev);
848         if (diag != 0) {
849                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
850                                 port_id, diag);
851                 rte_eth_dev_rx_queue_config(dev, 0);
852                 rte_eth_dev_tx_queue_config(dev, 0);
853                 return diag;
854         }
855
856         return 0;
857 }
858
859 static void
860 rte_eth_dev_config_restore(uint8_t port_id)
861 {
862         struct rte_eth_dev *dev;
863         struct rte_eth_dev_info dev_info;
864         struct ether_addr addr;
865         uint16_t i;
866         uint32_t pool = 0;
867
868         dev = &rte_eth_devices[port_id];
869
870         rte_eth_dev_info_get(port_id, &dev_info);
871
872         if (RTE_ETH_DEV_SRIOV(dev).active)
873                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
874
875         /* replay MAC address configuration */
876         for (i = 0; i < dev_info.max_mac_addrs; i++) {
877                 addr = dev->data->mac_addrs[i];
878
879                 /* skip zero address */
880                 if (is_zero_ether_addr(&addr))
881                         continue;
882
883                 /* add address to the hardware */
884                 if  (*dev->dev_ops->mac_addr_add &&
885                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
886                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
887                 else {
888                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
889                                         port_id);
890                         /* exit the loop but not return an error */
891                         break;
892                 }
893         }
894
895         /* replay promiscuous configuration */
896         if (rte_eth_promiscuous_get(port_id) == 1)
897                 rte_eth_promiscuous_enable(port_id);
898         else if (rte_eth_promiscuous_get(port_id) == 0)
899                 rte_eth_promiscuous_disable(port_id);
900
901         /* replay all multicast configuration */
902         if (rte_eth_allmulticast_get(port_id) == 1)
903                 rte_eth_allmulticast_enable(port_id);
904         else if (rte_eth_allmulticast_get(port_id) == 0)
905                 rte_eth_allmulticast_disable(port_id);
906 }
907
908 int
909 rte_eth_dev_start(uint8_t port_id)
910 {
911         struct rte_eth_dev *dev;
912         int diag;
913
914         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
915
916         dev = &rte_eth_devices[port_id];
917
918         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
919
920         if (dev->data->dev_started != 0) {
921                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
922                         " already started\n",
923                         port_id);
924                 return 0;
925         }
926
927         diag = (*dev->dev_ops->dev_start)(dev);
928         if (diag == 0)
929                 dev->data->dev_started = 1;
930         else
931                 return diag;
932
933         rte_eth_dev_config_restore(port_id);
934
935         if (dev->data->dev_conf.intr_conf.lsc == 0) {
936                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
937                 (*dev->dev_ops->link_update)(dev, 0);
938         }
939         return 0;
940 }
941
942 void
943 rte_eth_dev_stop(uint8_t port_id)
944 {
945         struct rte_eth_dev *dev;
946
947         RTE_ETH_VALID_PORTID_OR_RET(port_id);
948         dev = &rte_eth_devices[port_id];
949
950         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
951
952         if (dev->data->dev_started == 0) {
953                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
954                         " already stopped\n",
955                         port_id);
956                 return;
957         }
958
959         dev->data->dev_started = 0;
960         (*dev->dev_ops->dev_stop)(dev);
961 }
962
963 int
964 rte_eth_dev_set_link_up(uint8_t port_id)
965 {
966         struct rte_eth_dev *dev;
967
968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
969
970         dev = &rte_eth_devices[port_id];
971
972         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
973         return (*dev->dev_ops->dev_set_link_up)(dev);
974 }
975
976 int
977 rte_eth_dev_set_link_down(uint8_t port_id)
978 {
979         struct rte_eth_dev *dev;
980
981         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
982
983         dev = &rte_eth_devices[port_id];
984
985         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
986         return (*dev->dev_ops->dev_set_link_down)(dev);
987 }
988
989 void
990 rte_eth_dev_close(uint8_t port_id)
991 {
992         struct rte_eth_dev *dev;
993
994         RTE_ETH_VALID_PORTID_OR_RET(port_id);
995         dev = &rte_eth_devices[port_id];
996
997         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
998         dev->data->dev_started = 0;
999         (*dev->dev_ops->dev_close)(dev);
1000
1001         rte_free(dev->data->rx_queues);
1002         dev->data->rx_queues = NULL;
1003         rte_free(dev->data->tx_queues);
1004         dev->data->tx_queues = NULL;
1005 }
1006
1007 int
1008 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1009                        uint16_t nb_rx_desc, unsigned int socket_id,
1010                        const struct rte_eth_rxconf *rx_conf,
1011                        struct rte_mempool *mp)
1012 {
1013         int ret;
1014         uint32_t mbp_buf_size;
1015         struct rte_eth_dev *dev;
1016         struct rte_eth_dev_info dev_info;
1017         void **rxq;
1018
1019         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1020
1021         dev = &rte_eth_devices[port_id];
1022         if (rx_queue_id >= dev->data->nb_rx_queues) {
1023                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1024                 return -EINVAL;
1025         }
1026
1027         if (dev->data->dev_started) {
1028                 RTE_PMD_DEBUG_TRACE(
1029                     "port %d must be stopped to allow configuration\n", port_id);
1030                 return -EBUSY;
1031         }
1032
1033         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1034         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1035
1036         /*
1037          * Check the size of the mbuf data buffer.
1038          * This value must be provided in the private data of the memory pool.
1039          * First check that the memory pool has a valid private data.
1040          */
1041         rte_eth_dev_info_get(port_id, &dev_info);
1042         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1043                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1044                                 mp->name, (int) mp->private_data_size,
1045                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1046                 return -ENOSPC;
1047         }
1048         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1049
1050         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1051                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1052                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1053                                 "=%d)\n",
1054                                 mp->name,
1055                                 (int)mbp_buf_size,
1056                                 (int)(RTE_PKTMBUF_HEADROOM +
1057                                       dev_info.min_rx_bufsize),
1058                                 (int)RTE_PKTMBUF_HEADROOM,
1059                                 (int)dev_info.min_rx_bufsize);
1060                 return -EINVAL;
1061         }
1062
1063         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1064                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1065                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1066
1067                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1068                         "should be: <= %hu, = %hu, and a product of %hu\n",
1069                         nb_rx_desc,
1070                         dev_info.rx_desc_lim.nb_max,
1071                         dev_info.rx_desc_lim.nb_min,
1072                         dev_info.rx_desc_lim.nb_align);
1073                 return -EINVAL;
1074         }
1075
1076         rxq = dev->data->rx_queues;
1077         if (rxq[rx_queue_id]) {
1078                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1079                                         -ENOTSUP);
1080                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1081                 rxq[rx_queue_id] = NULL;
1082         }
1083
1084         if (rx_conf == NULL)
1085                 rx_conf = &dev_info.default_rxconf;
1086
1087         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1088                                               socket_id, rx_conf, mp);
1089         if (!ret) {
1090                 if (!dev->data->min_rx_buf_size ||
1091                     dev->data->min_rx_buf_size > mbp_buf_size)
1092                         dev->data->min_rx_buf_size = mbp_buf_size;
1093         }
1094
1095         return ret;
1096 }
1097
1098 int
1099 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1100                        uint16_t nb_tx_desc, unsigned int socket_id,
1101                        const struct rte_eth_txconf *tx_conf)
1102 {
1103         struct rte_eth_dev *dev;
1104         struct rte_eth_dev_info dev_info;
1105         void **txq;
1106
1107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1108
1109         dev = &rte_eth_devices[port_id];
1110         if (tx_queue_id >= dev->data->nb_tx_queues) {
1111                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1112                 return -EINVAL;
1113         }
1114
1115         if (dev->data->dev_started) {
1116                 RTE_PMD_DEBUG_TRACE(
1117                     "port %d must be stopped to allow configuration\n", port_id);
1118                 return -EBUSY;
1119         }
1120
1121         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1122         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1123
1124         rte_eth_dev_info_get(port_id, &dev_info);
1125
1126         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1127             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1128             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1129                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1130                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1131                                 nb_tx_desc,
1132                                 dev_info.tx_desc_lim.nb_max,
1133                                 dev_info.tx_desc_lim.nb_min,
1134                                 dev_info.tx_desc_lim.nb_align);
1135                 return -EINVAL;
1136         }
1137
1138         txq = dev->data->tx_queues;
1139         if (txq[tx_queue_id]) {
1140                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1141                                         -ENOTSUP);
1142                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1143                 txq[tx_queue_id] = NULL;
1144         }
1145
1146         if (tx_conf == NULL)
1147                 tx_conf = &dev_info.default_txconf;
1148
1149         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1150                                                socket_id, tx_conf);
1151 }
1152
1153 void
1154 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1155                 void *userdata __rte_unused)
1156 {
1157         unsigned i;
1158
1159         for (i = 0; i < unsent; i++)
1160                 rte_pktmbuf_free(pkts[i]);
1161 }
1162
1163 void
1164 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1165                 void *userdata)
1166 {
1167         uint64_t *count = userdata;
1168         unsigned i;
1169
1170         for (i = 0; i < unsent; i++)
1171                 rte_pktmbuf_free(pkts[i]);
1172
1173         *count += unsent;
1174 }
1175
1176 int
1177 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1178                 buffer_tx_error_fn cbfn, void *userdata)
1179 {
1180         buffer->error_callback = cbfn;
1181         buffer->error_userdata = userdata;
1182         return 0;
1183 }
1184
1185 int
1186 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1187 {
1188         int ret = 0;
1189
1190         if (buffer == NULL)
1191                 return -EINVAL;
1192
1193         buffer->size = size;
1194         if (buffer->error_callback == NULL) {
1195                 ret = rte_eth_tx_buffer_set_err_callback(
1196                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1197         }
1198
1199         return ret;
1200 }
1201
1202 void
1203 rte_eth_promiscuous_enable(uint8_t port_id)
1204 {
1205         struct rte_eth_dev *dev;
1206
1207         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1208         dev = &rte_eth_devices[port_id];
1209
1210         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1211         (*dev->dev_ops->promiscuous_enable)(dev);
1212         dev->data->promiscuous = 1;
1213 }
1214
1215 void
1216 rte_eth_promiscuous_disable(uint8_t port_id)
1217 {
1218         struct rte_eth_dev *dev;
1219
1220         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1221         dev = &rte_eth_devices[port_id];
1222
1223         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1224         dev->data->promiscuous = 0;
1225         (*dev->dev_ops->promiscuous_disable)(dev);
1226 }
1227
1228 int
1229 rte_eth_promiscuous_get(uint8_t port_id)
1230 {
1231         struct rte_eth_dev *dev;
1232
1233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1234
1235         dev = &rte_eth_devices[port_id];
1236         return dev->data->promiscuous;
1237 }
1238
1239 void
1240 rte_eth_allmulticast_enable(uint8_t port_id)
1241 {
1242         struct rte_eth_dev *dev;
1243
1244         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1245         dev = &rte_eth_devices[port_id];
1246
1247         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1248         (*dev->dev_ops->allmulticast_enable)(dev);
1249         dev->data->all_multicast = 1;
1250 }
1251
1252 void
1253 rte_eth_allmulticast_disable(uint8_t port_id)
1254 {
1255         struct rte_eth_dev *dev;
1256
1257         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1258         dev = &rte_eth_devices[port_id];
1259
1260         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1261         dev->data->all_multicast = 0;
1262         (*dev->dev_ops->allmulticast_disable)(dev);
1263 }
1264
1265 int
1266 rte_eth_allmulticast_get(uint8_t port_id)
1267 {
1268         struct rte_eth_dev *dev;
1269
1270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1271
1272         dev = &rte_eth_devices[port_id];
1273         return dev->data->all_multicast;
1274 }
1275
1276 static inline int
1277 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1278                                 struct rte_eth_link *link)
1279 {
1280         struct rte_eth_link *dst = link;
1281         struct rte_eth_link *src = &(dev->data->dev_link);
1282
1283         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1284                                         *(uint64_t *)src) == 0)
1285                 return -1;
1286
1287         return 0;
1288 }
1289
1290 void
1291 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1292 {
1293         struct rte_eth_dev *dev;
1294
1295         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1296         dev = &rte_eth_devices[port_id];
1297
1298         if (dev->data->dev_conf.intr_conf.lsc != 0)
1299                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1300         else {
1301                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1302                 (*dev->dev_ops->link_update)(dev, 1);
1303                 *eth_link = dev->data->dev_link;
1304         }
1305 }
1306
1307 void
1308 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1309 {
1310         struct rte_eth_dev *dev;
1311
1312         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1313         dev = &rte_eth_devices[port_id];
1314
1315         if (dev->data->dev_conf.intr_conf.lsc != 0)
1316                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1317         else {
1318                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1319                 (*dev->dev_ops->link_update)(dev, 0);
1320                 *eth_link = dev->data->dev_link;
1321         }
1322 }
1323
1324 int
1325 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1326 {
1327         struct rte_eth_dev *dev;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1330
1331         dev = &rte_eth_devices[port_id];
1332         memset(stats, 0, sizeof(*stats));
1333
1334         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1335         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1336         (*dev->dev_ops->stats_get)(dev, stats);
1337         return 0;
1338 }
1339
1340 void
1341 rte_eth_stats_reset(uint8_t port_id)
1342 {
1343         struct rte_eth_dev *dev;
1344
1345         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1346         dev = &rte_eth_devices[port_id];
1347
1348         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1349         (*dev->dev_ops->stats_reset)(dev);
1350         dev->data->rx_mbuf_alloc_failed = 0;
1351 }
1352
1353 static int
1354 get_xstats_count(uint8_t port_id)
1355 {
1356         struct rte_eth_dev *dev;
1357         int count;
1358
1359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1360         dev = &rte_eth_devices[port_id];
1361         if (dev->dev_ops->xstats_get_names != NULL) {
1362                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1363                 if (count < 0)
1364                         return count;
1365         } else
1366                 count = 0;
1367         count += RTE_NB_STATS;
1368         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1369                  RTE_NB_RXQ_STATS;
1370         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1371                  RTE_NB_TXQ_STATS;
1372         return count;
1373 }
1374
1375 int
1376 rte_eth_xstats_get_names(uint8_t port_id,
1377         struct rte_eth_xstat_name *xstats_names,
1378         unsigned size)
1379 {
1380         struct rte_eth_dev *dev;
1381         int cnt_used_entries;
1382         int cnt_expected_entries;
1383         int cnt_driver_entries;
1384         uint32_t idx, id_queue;
1385         uint16_t num_q;
1386
1387         cnt_expected_entries = get_xstats_count(port_id);
1388         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1389                         (int)size < cnt_expected_entries)
1390                 return cnt_expected_entries;
1391
1392         /* port_id checked in get_xstats_count() */
1393         dev = &rte_eth_devices[port_id];
1394         cnt_used_entries = 0;
1395
1396         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1397                 snprintf(xstats_names[cnt_used_entries].name,
1398                         sizeof(xstats_names[0].name),
1399                         "%s", rte_stats_strings[idx].name);
1400                 cnt_used_entries++;
1401         }
1402         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1403         for (id_queue = 0; id_queue < num_q; id_queue++) {
1404                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1405                         snprintf(xstats_names[cnt_used_entries].name,
1406                                 sizeof(xstats_names[0].name),
1407                                 "rx_q%u%s",
1408                                 id_queue, rte_rxq_stats_strings[idx].name);
1409                         cnt_used_entries++;
1410                 }
1411
1412         }
1413         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1414         for (id_queue = 0; id_queue < num_q; id_queue++) {
1415                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1416                         snprintf(xstats_names[cnt_used_entries].name,
1417                                 sizeof(xstats_names[0].name),
1418                                 "tx_q%u%s",
1419                                 id_queue, rte_txq_stats_strings[idx].name);
1420                         cnt_used_entries++;
1421                 }
1422         }
1423
1424         if (dev->dev_ops->xstats_get_names != NULL) {
1425                 /* If there are any driver-specific xstats, append them
1426                  * to end of list.
1427                  */
1428                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1429                         dev,
1430                         xstats_names + cnt_used_entries,
1431                         size - cnt_used_entries);
1432                 if (cnt_driver_entries < 0)
1433                         return cnt_driver_entries;
1434                 cnt_used_entries += cnt_driver_entries;
1435         }
1436
1437         return cnt_used_entries;
1438 }
1439
1440 /* retrieve ethdev extended statistics */
1441 int
1442 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1443         unsigned n)
1444 {
1445         struct rte_eth_stats eth_stats;
1446         struct rte_eth_dev *dev;
1447         unsigned count = 0, i, q;
1448         signed xcount = 0;
1449         uint64_t val, *stats_ptr;
1450         uint16_t nb_rxqs, nb_txqs;
1451
1452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1453
1454         dev = &rte_eth_devices[port_id];
1455
1456         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1457         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1458
1459         /* Return generic statistics */
1460         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1461                 (nb_txqs * RTE_NB_TXQ_STATS);
1462
1463         /* implemented by the driver */
1464         if (dev->dev_ops->xstats_get != NULL) {
1465                 /* Retrieve the xstats from the driver at the end of the
1466                  * xstats struct.
1467                  */
1468                 xcount = (*dev->dev_ops->xstats_get)(dev,
1469                                      xstats ? xstats + count : NULL,
1470                                      (n > count) ? n - count : 0);
1471
1472                 if (xcount < 0)
1473                         return xcount;
1474         }
1475
1476         if (n < count + xcount || xstats == NULL)
1477                 return count + xcount;
1478
1479         /* now fill the xstats structure */
1480         count = 0;
1481         rte_eth_stats_get(port_id, &eth_stats);
1482
1483         /* global stats */
1484         for (i = 0; i < RTE_NB_STATS; i++) {
1485                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1486                                         rte_stats_strings[i].offset);
1487                 val = *stats_ptr;
1488                 xstats[count++].value = val;
1489         }
1490
1491         /* per-rxq stats */
1492         for (q = 0; q < nb_rxqs; q++) {
1493                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1494                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1495                                         rte_rxq_stats_strings[i].offset +
1496                                         q * sizeof(uint64_t));
1497                         val = *stats_ptr;
1498                         xstats[count++].value = val;
1499                 }
1500         }
1501
1502         /* per-txq stats */
1503         for (q = 0; q < nb_txqs; q++) {
1504                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1505                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1506                                         rte_txq_stats_strings[i].offset +
1507                                         q * sizeof(uint64_t));
1508                         val = *stats_ptr;
1509                         xstats[count++].value = val;
1510                 }
1511         }
1512
1513         for (i = 0; i < count + xcount; i++)
1514                 xstats[i].id = i;
1515
1516         return count + xcount;
1517 }
1518
1519 /* reset ethdev extended statistics */
1520 void
1521 rte_eth_xstats_reset(uint8_t port_id)
1522 {
1523         struct rte_eth_dev *dev;
1524
1525         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1526         dev = &rte_eth_devices[port_id];
1527
1528         /* implemented by the driver */
1529         if (dev->dev_ops->xstats_reset != NULL) {
1530                 (*dev->dev_ops->xstats_reset)(dev);
1531                 return;
1532         }
1533
1534         /* fallback to default */
1535         rte_eth_stats_reset(port_id);
1536 }
1537
1538 static int
1539 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1540                 uint8_t is_rx)
1541 {
1542         struct rte_eth_dev *dev;
1543
1544         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1545
1546         dev = &rte_eth_devices[port_id];
1547
1548         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1549         return (*dev->dev_ops->queue_stats_mapping_set)
1550                         (dev, queue_id, stat_idx, is_rx);
1551 }
1552
1553
1554 int
1555 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1556                 uint8_t stat_idx)
1557 {
1558         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1559                         STAT_QMAP_TX);
1560 }
1561
1562
1563 int
1564 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1565                 uint8_t stat_idx)
1566 {
1567         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1568                         STAT_QMAP_RX);
1569 }
1570
1571 void
1572 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1573 {
1574         struct rte_eth_dev *dev;
1575         const struct rte_eth_desc_lim lim = {
1576                 .nb_max = UINT16_MAX,
1577                 .nb_min = 0,
1578                 .nb_align = 1,
1579         };
1580
1581         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1582         dev = &rte_eth_devices[port_id];
1583
1584         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1585         dev_info->rx_desc_lim = lim;
1586         dev_info->tx_desc_lim = lim;
1587
1588         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1589         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1590         dev_info->pci_dev = dev->pci_dev;
1591         dev_info->driver_name = dev->data->drv_name;
1592         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1593         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1594 }
1595
1596 int
1597 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1598                                  uint32_t *ptypes, int num)
1599 {
1600         int i, j;
1601         struct rte_eth_dev *dev;
1602         const uint32_t *all_ptypes;
1603
1604         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1605         dev = &rte_eth_devices[port_id];
1606         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1607         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1608
1609         if (!all_ptypes)
1610                 return 0;
1611
1612         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1613                 if (all_ptypes[i] & ptype_mask) {
1614                         if (j < num)
1615                                 ptypes[j] = all_ptypes[i];
1616                         j++;
1617                 }
1618
1619         return j;
1620 }
1621
1622 void
1623 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1624 {
1625         struct rte_eth_dev *dev;
1626
1627         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1628         dev = &rte_eth_devices[port_id];
1629         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1630 }
1631
1632
1633 int
1634 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1635 {
1636         struct rte_eth_dev *dev;
1637
1638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1639
1640         dev = &rte_eth_devices[port_id];
1641         *mtu = dev->data->mtu;
1642         return 0;
1643 }
1644
1645 int
1646 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1647 {
1648         int ret;
1649         struct rte_eth_dev *dev;
1650
1651         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1652         dev = &rte_eth_devices[port_id];
1653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1654
1655         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1656         if (!ret)
1657                 dev->data->mtu = mtu;
1658
1659         return ret;
1660 }
1661
1662 int
1663 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1664 {
1665         struct rte_eth_dev *dev;
1666
1667         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1668         dev = &rte_eth_devices[port_id];
1669         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1670                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1671                 return -ENOSYS;
1672         }
1673
1674         if (vlan_id > 4095) {
1675                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1676                                 port_id, (unsigned) vlan_id);
1677                 return -EINVAL;
1678         }
1679         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1680
1681         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1682 }
1683
1684 int
1685 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1686 {
1687         struct rte_eth_dev *dev;
1688
1689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1690         dev = &rte_eth_devices[port_id];
1691         if (rx_queue_id >= dev->data->nb_rx_queues) {
1692                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1693                 return -EINVAL;
1694         }
1695
1696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1697         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1698
1699         return 0;
1700 }
1701
1702 int
1703 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1704                                 enum rte_vlan_type vlan_type,
1705                                 uint16_t tpid)
1706 {
1707         struct rte_eth_dev *dev;
1708
1709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1710         dev = &rte_eth_devices[port_id];
1711         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1712
1713         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1714 }
1715
1716 int
1717 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1718 {
1719         struct rte_eth_dev *dev;
1720         int ret = 0;
1721         int mask = 0;
1722         int cur, org = 0;
1723
1724         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1725         dev = &rte_eth_devices[port_id];
1726
1727         /*check which option changed by application*/
1728         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1729         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1730         if (cur != org) {
1731                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1732                 mask |= ETH_VLAN_STRIP_MASK;
1733         }
1734
1735         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1736         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1737         if (cur != org) {
1738                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1739                 mask |= ETH_VLAN_FILTER_MASK;
1740         }
1741
1742         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1743         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1744         if (cur != org) {
1745                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1746                 mask |= ETH_VLAN_EXTEND_MASK;
1747         }
1748
1749         /*no change*/
1750         if (mask == 0)
1751                 return ret;
1752
1753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1754         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1755
1756         return ret;
1757 }
1758
1759 int
1760 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1761 {
1762         struct rte_eth_dev *dev;
1763         int ret = 0;
1764
1765         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1766         dev = &rte_eth_devices[port_id];
1767
1768         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1769                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1770
1771         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1772                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1773
1774         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1775                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1776
1777         return ret;
1778 }
1779
1780 int
1781 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1782 {
1783         struct rte_eth_dev *dev;
1784
1785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1786         dev = &rte_eth_devices[port_id];
1787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1788         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1789
1790         return 0;
1791 }
1792
1793 int
1794 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1795 {
1796         struct rte_eth_dev *dev;
1797
1798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1799         dev = &rte_eth_devices[port_id];
1800         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1801         memset(fc_conf, 0, sizeof(*fc_conf));
1802         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1803 }
1804
1805 int
1806 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1807 {
1808         struct rte_eth_dev *dev;
1809
1810         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1811         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1812                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1813                 return -EINVAL;
1814         }
1815
1816         dev = &rte_eth_devices[port_id];
1817         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1818         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1819 }
1820
1821 int
1822 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1823 {
1824         struct rte_eth_dev *dev;
1825
1826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1827         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1828                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1829                 return -EINVAL;
1830         }
1831
1832         dev = &rte_eth_devices[port_id];
1833         /* High water, low water validation are device specific */
1834         if  (*dev->dev_ops->priority_flow_ctrl_set)
1835                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1836         return -ENOTSUP;
1837 }
1838
1839 static int
1840 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1841                         uint16_t reta_size)
1842 {
1843         uint16_t i, num;
1844
1845         if (!reta_conf)
1846                 return -EINVAL;
1847
1848         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1849                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1850                                                         RTE_RETA_GROUP_SIZE);
1851                 return -EINVAL;
1852         }
1853
1854         num = reta_size / RTE_RETA_GROUP_SIZE;
1855         for (i = 0; i < num; i++) {
1856                 if (reta_conf[i].mask)
1857                         return 0;
1858         }
1859
1860         return -EINVAL;
1861 }
1862
1863 static int
1864 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1865                          uint16_t reta_size,
1866                          uint16_t max_rxq)
1867 {
1868         uint16_t i, idx, shift;
1869
1870         if (!reta_conf)
1871                 return -EINVAL;
1872
1873         if (max_rxq == 0) {
1874                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1875                 return -EINVAL;
1876         }
1877
1878         for (i = 0; i < reta_size; i++) {
1879                 idx = i / RTE_RETA_GROUP_SIZE;
1880                 shift = i % RTE_RETA_GROUP_SIZE;
1881                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1882                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1883                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1884                                 "the maximum rxq index: %u\n", idx, shift,
1885                                 reta_conf[idx].reta[shift], max_rxq);
1886                         return -EINVAL;
1887                 }
1888         }
1889
1890         return 0;
1891 }
1892
1893 int
1894 rte_eth_dev_rss_reta_update(uint8_t port_id,
1895                             struct rte_eth_rss_reta_entry64 *reta_conf,
1896                             uint16_t reta_size)
1897 {
1898         struct rte_eth_dev *dev;
1899         int ret;
1900
1901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1902         /* Check mask bits */
1903         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1904         if (ret < 0)
1905                 return ret;
1906
1907         dev = &rte_eth_devices[port_id];
1908
1909         /* Check entry value */
1910         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1911                                 dev->data->nb_rx_queues);
1912         if (ret < 0)
1913                 return ret;
1914
1915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1916         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1917 }
1918
1919 int
1920 rte_eth_dev_rss_reta_query(uint8_t port_id,
1921                            struct rte_eth_rss_reta_entry64 *reta_conf,
1922                            uint16_t reta_size)
1923 {
1924         struct rte_eth_dev *dev;
1925         int ret;
1926
1927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1928
1929         /* Check mask bits */
1930         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1931         if (ret < 0)
1932                 return ret;
1933
1934         dev = &rte_eth_devices[port_id];
1935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1936         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1937 }
1938
1939 int
1940 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1941 {
1942         struct rte_eth_dev *dev;
1943         uint16_t rss_hash_protos;
1944
1945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1946         rss_hash_protos = rss_conf->rss_hf;
1947         if ((rss_hash_protos != 0) &&
1948             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1949                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1950                                 rss_hash_protos);
1951                 return -EINVAL;
1952         }
1953         dev = &rte_eth_devices[port_id];
1954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1955         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1956 }
1957
1958 int
1959 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1960                               struct rte_eth_rss_conf *rss_conf)
1961 {
1962         struct rte_eth_dev *dev;
1963
1964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1965         dev = &rte_eth_devices[port_id];
1966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1967         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1968 }
1969
1970 int
1971 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1972                                 struct rte_eth_udp_tunnel *udp_tunnel)
1973 {
1974         struct rte_eth_dev *dev;
1975
1976         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1977         if (udp_tunnel == NULL) {
1978                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1979                 return -EINVAL;
1980         }
1981
1982         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1983                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1984                 return -EINVAL;
1985         }
1986
1987         dev = &rte_eth_devices[port_id];
1988         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
1989         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
1990 }
1991
1992 int
1993 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
1994                                    struct rte_eth_udp_tunnel *udp_tunnel)
1995 {
1996         struct rte_eth_dev *dev;
1997
1998         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1999         dev = &rte_eth_devices[port_id];
2000
2001         if (udp_tunnel == NULL) {
2002                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2003                 return -EINVAL;
2004         }
2005
2006         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2007                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2008                 return -EINVAL;
2009         }
2010
2011         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2012         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2013 }
2014
2015 int
2016 rte_eth_led_on(uint8_t port_id)
2017 {
2018         struct rte_eth_dev *dev;
2019
2020         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2021         dev = &rte_eth_devices[port_id];
2022         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2023         return (*dev->dev_ops->dev_led_on)(dev);
2024 }
2025
2026 int
2027 rte_eth_led_off(uint8_t port_id)
2028 {
2029         struct rte_eth_dev *dev;
2030
2031         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2032         dev = &rte_eth_devices[port_id];
2033         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2034         return (*dev->dev_ops->dev_led_off)(dev);
2035 }
2036
2037 /*
2038  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2039  * an empty spot.
2040  */
2041 static int
2042 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2043 {
2044         struct rte_eth_dev_info dev_info;
2045         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2046         unsigned i;
2047
2048         rte_eth_dev_info_get(port_id, &dev_info);
2049
2050         for (i = 0; i < dev_info.max_mac_addrs; i++)
2051                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2052                         return i;
2053
2054         return -1;
2055 }
2056
2057 static const struct ether_addr null_mac_addr;
2058
2059 int
2060 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2061                         uint32_t pool)
2062 {
2063         struct rte_eth_dev *dev;
2064         int index;
2065         uint64_t pool_mask;
2066
2067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2068         dev = &rte_eth_devices[port_id];
2069         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2070
2071         if (is_zero_ether_addr(addr)) {
2072                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2073                         port_id);
2074                 return -EINVAL;
2075         }
2076         if (pool >= ETH_64_POOLS) {
2077                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2078                 return -EINVAL;
2079         }
2080
2081         index = get_mac_addr_index(port_id, addr);
2082         if (index < 0) {
2083                 index = get_mac_addr_index(port_id, &null_mac_addr);
2084                 if (index < 0) {
2085                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2086                                 port_id);
2087                         return -ENOSPC;
2088                 }
2089         } else {
2090                 pool_mask = dev->data->mac_pool_sel[index];
2091
2092                 /* Check if both MAC address and pool is already there, and do nothing */
2093                 if (pool_mask & (1ULL << pool))
2094                         return 0;
2095         }
2096
2097         /* Update NIC */
2098         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2099
2100         /* Update address in NIC data structure */
2101         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2102
2103         /* Update pool bitmap in NIC data structure */
2104         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2105
2106         return 0;
2107 }
2108
2109 int
2110 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2111 {
2112         struct rte_eth_dev *dev;
2113         int index;
2114
2115         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2116         dev = &rte_eth_devices[port_id];
2117         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2118
2119         index = get_mac_addr_index(port_id, addr);
2120         if (index == 0) {
2121                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2122                 return -EADDRINUSE;
2123         } else if (index < 0)
2124                 return 0;  /* Do nothing if address wasn't found */
2125
2126         /* Update NIC */
2127         (*dev->dev_ops->mac_addr_remove)(dev, index);
2128
2129         /* Update address in NIC data structure */
2130         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2131
2132         /* reset pool bitmap */
2133         dev->data->mac_pool_sel[index] = 0;
2134
2135         return 0;
2136 }
2137
2138 int
2139 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2140 {
2141         struct rte_eth_dev *dev;
2142
2143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2144
2145         if (!is_valid_assigned_ether_addr(addr))
2146                 return -EINVAL;
2147
2148         dev = &rte_eth_devices[port_id];
2149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2150
2151         /* Update default address in NIC data structure */
2152         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2153
2154         (*dev->dev_ops->mac_addr_set)(dev, addr);
2155
2156         return 0;
2157 }
2158
2159 int
2160 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2161                                 uint16_t rx_mode, uint8_t on)
2162 {
2163         uint16_t num_vfs;
2164         struct rte_eth_dev *dev;
2165         struct rte_eth_dev_info dev_info;
2166
2167         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2168
2169         dev = &rte_eth_devices[port_id];
2170         rte_eth_dev_info_get(port_id, &dev_info);
2171
2172         num_vfs = dev_info.max_vfs;
2173         if (vf > num_vfs) {
2174                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2175                 return -EINVAL;
2176         }
2177
2178         if (rx_mode == 0) {
2179                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2180                 return -EINVAL;
2181         }
2182         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2183         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2184 }
2185
2186 /*
2187  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2188  * an empty spot.
2189  */
2190 static int
2191 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2192 {
2193         struct rte_eth_dev_info dev_info;
2194         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2195         unsigned i;
2196
2197         rte_eth_dev_info_get(port_id, &dev_info);
2198         if (!dev->data->hash_mac_addrs)
2199                 return -1;
2200
2201         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2202                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2203                         ETHER_ADDR_LEN) == 0)
2204                         return i;
2205
2206         return -1;
2207 }
2208
2209 int
2210 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2211                                 uint8_t on)
2212 {
2213         int index;
2214         int ret;
2215         struct rte_eth_dev *dev;
2216
2217         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2218
2219         dev = &rte_eth_devices[port_id];
2220         if (is_zero_ether_addr(addr)) {
2221                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2222                         port_id);
2223                 return -EINVAL;
2224         }
2225
2226         index = get_hash_mac_addr_index(port_id, addr);
2227         /* Check if it's already there, and do nothing */
2228         if ((index >= 0) && (on))
2229                 return 0;
2230
2231         if (index < 0) {
2232                 if (!on) {
2233                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2234                                 "set in UTA\n", port_id);
2235                         return -EINVAL;
2236                 }
2237
2238                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2239                 if (index < 0) {
2240                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2241                                         port_id);
2242                         return -ENOSPC;
2243                 }
2244         }
2245
2246         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2247         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2248         if (ret == 0) {
2249                 /* Update address in NIC data structure */
2250                 if (on)
2251                         ether_addr_copy(addr,
2252                                         &dev->data->hash_mac_addrs[index]);
2253                 else
2254                         ether_addr_copy(&null_mac_addr,
2255                                         &dev->data->hash_mac_addrs[index]);
2256         }
2257
2258         return ret;
2259 }
2260
2261 int
2262 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2263 {
2264         struct rte_eth_dev *dev;
2265
2266         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2267
2268         dev = &rte_eth_devices[port_id];
2269
2270         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2271         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2272 }
2273
2274 int
2275 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2276 {
2277         uint16_t num_vfs;
2278         struct rte_eth_dev *dev;
2279         struct rte_eth_dev_info dev_info;
2280
2281         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2282
2283         dev = &rte_eth_devices[port_id];
2284         rte_eth_dev_info_get(port_id, &dev_info);
2285
2286         num_vfs = dev_info.max_vfs;
2287         if (vf > num_vfs) {
2288                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2289                 return -EINVAL;
2290         }
2291
2292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2293         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2294 }
2295
2296 int
2297 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2298 {
2299         uint16_t num_vfs;
2300         struct rte_eth_dev *dev;
2301         struct rte_eth_dev_info dev_info;
2302
2303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2304
2305         dev = &rte_eth_devices[port_id];
2306         rte_eth_dev_info_get(port_id, &dev_info);
2307
2308         num_vfs = dev_info.max_vfs;
2309         if (vf > num_vfs) {
2310                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2311                 return -EINVAL;
2312         }
2313
2314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2315         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2316 }
2317
2318 int
2319 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2320                                uint64_t vf_mask, uint8_t vlan_on)
2321 {
2322         struct rte_eth_dev *dev;
2323
2324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2325
2326         dev = &rte_eth_devices[port_id];
2327
2328         if (vlan_id > ETHER_MAX_VLAN_ID) {
2329                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2330                         vlan_id);
2331                 return -EINVAL;
2332         }
2333
2334         if (vf_mask == 0) {
2335                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2336                 return -EINVAL;
2337         }
2338
2339         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2340         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2341                                                    vf_mask, vlan_on);
2342 }
2343
2344 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2345                                         uint16_t tx_rate)
2346 {
2347         struct rte_eth_dev *dev;
2348         struct rte_eth_dev_info dev_info;
2349         struct rte_eth_link link;
2350
2351         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2352
2353         dev = &rte_eth_devices[port_id];
2354         rte_eth_dev_info_get(port_id, &dev_info);
2355         link = dev->data->dev_link;
2356
2357         if (queue_idx > dev_info.max_tx_queues) {
2358                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2359                                 "invalid queue id=%d\n", port_id, queue_idx);
2360                 return -EINVAL;
2361         }
2362
2363         if (tx_rate > link.link_speed) {
2364                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2365                                 "bigger than link speed= %d\n",
2366                         tx_rate, link.link_speed);
2367                 return -EINVAL;
2368         }
2369
2370         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2371         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2372 }
2373
2374 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2375                                 uint64_t q_msk)
2376 {
2377         struct rte_eth_dev *dev;
2378         struct rte_eth_dev_info dev_info;
2379         struct rte_eth_link link;
2380
2381         if (q_msk == 0)
2382                 return 0;
2383
2384         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2385
2386         dev = &rte_eth_devices[port_id];
2387         rte_eth_dev_info_get(port_id, &dev_info);
2388         link = dev->data->dev_link;
2389
2390         if (vf > dev_info.max_vfs) {
2391                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2392                                 "invalid vf id=%d\n", port_id, vf);
2393                 return -EINVAL;
2394         }
2395
2396         if (tx_rate > link.link_speed) {
2397                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2398                                 "bigger than link speed= %d\n",
2399                                 tx_rate, link.link_speed);
2400                 return -EINVAL;
2401         }
2402
2403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2404         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2405 }
2406
2407 int
2408 rte_eth_mirror_rule_set(uint8_t port_id,
2409                         struct rte_eth_mirror_conf *mirror_conf,
2410                         uint8_t rule_id, uint8_t on)
2411 {
2412         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2413
2414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2415         if (mirror_conf->rule_type == 0) {
2416                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2417                 return -EINVAL;
2418         }
2419
2420         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2421                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2422                                 ETH_64_POOLS - 1);
2423                 return -EINVAL;
2424         }
2425
2426         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2427              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2428             (mirror_conf->pool_mask == 0)) {
2429                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2430                 return -EINVAL;
2431         }
2432
2433         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2434             mirror_conf->vlan.vlan_mask == 0) {
2435                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2436                 return -EINVAL;
2437         }
2438
2439         dev = &rte_eth_devices[port_id];
2440         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2441
2442         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2443 }
2444
2445 int
2446 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2447 {
2448         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2449
2450         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2451
2452         dev = &rte_eth_devices[port_id];
2453         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2454
2455         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2456 }
2457
2458 int
2459 rte_eth_dev_callback_register(uint8_t port_id,
2460                         enum rte_eth_event_type event,
2461                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2462 {
2463         struct rte_eth_dev *dev;
2464         struct rte_eth_dev_callback *user_cb;
2465
2466         if (!cb_fn)
2467                 return -EINVAL;
2468
2469         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2470
2471         dev = &rte_eth_devices[port_id];
2472         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2473
2474         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2475                 if (user_cb->cb_fn == cb_fn &&
2476                         user_cb->cb_arg == cb_arg &&
2477                         user_cb->event == event) {
2478                         break;
2479                 }
2480         }
2481
2482         /* create a new callback. */
2483         if (user_cb == NULL) {
2484                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2485                                         sizeof(struct rte_eth_dev_callback), 0);
2486                 if (user_cb != NULL) {
2487                         user_cb->cb_fn = cb_fn;
2488                         user_cb->cb_arg = cb_arg;
2489                         user_cb->event = event;
2490                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2491                 }
2492         }
2493
2494         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2495         return (user_cb == NULL) ? -ENOMEM : 0;
2496 }
2497
2498 int
2499 rte_eth_dev_callback_unregister(uint8_t port_id,
2500                         enum rte_eth_event_type event,
2501                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2502 {
2503         int ret;
2504         struct rte_eth_dev *dev;
2505         struct rte_eth_dev_callback *cb, *next;
2506
2507         if (!cb_fn)
2508                 return -EINVAL;
2509
2510         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2511
2512         dev = &rte_eth_devices[port_id];
2513         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2514
2515         ret = 0;
2516         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2517
2518                 next = TAILQ_NEXT(cb, next);
2519
2520                 if (cb->cb_fn != cb_fn || cb->event != event ||
2521                                 (cb->cb_arg != (void *)-1 &&
2522                                 cb->cb_arg != cb_arg))
2523                         continue;
2524
2525                 /*
2526                  * if this callback is not executing right now,
2527                  * then remove it.
2528                  */
2529                 if (cb->active == 0) {
2530                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2531                         rte_free(cb);
2532                 } else {
2533                         ret = -EAGAIN;
2534                 }
2535         }
2536
2537         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2538         return ret;
2539 }
2540
2541 void
2542 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2543         enum rte_eth_event_type event, void *cb_arg)
2544 {
2545         struct rte_eth_dev_callback *cb_lst;
2546         struct rte_eth_dev_callback dev_cb;
2547
2548         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2549         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2550                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2551                         continue;
2552                 dev_cb = *cb_lst;
2553                 cb_lst->active = 1;
2554                 if (cb_arg != NULL)
2555                         dev_cb.cb_arg = (void *) cb_arg;
2556
2557                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2558                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2559                                                 dev_cb.cb_arg);
2560                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2561                 cb_lst->active = 0;
2562         }
2563         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2564 }
2565
2566 int
2567 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2568 {
2569         uint32_t vec;
2570         struct rte_eth_dev *dev;
2571         struct rte_intr_handle *intr_handle;
2572         uint16_t qid;
2573         int rc;
2574
2575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2576
2577         dev = &rte_eth_devices[port_id];
2578         intr_handle = &dev->pci_dev->intr_handle;
2579         if (!intr_handle->intr_vec) {
2580                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2581                 return -EPERM;
2582         }
2583
2584         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2585                 vec = intr_handle->intr_vec[qid];
2586                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2587                 if (rc && rc != -EEXIST) {
2588                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2589                                         " op %d epfd %d vec %u\n",
2590                                         port_id, qid, op, epfd, vec);
2591                 }
2592         }
2593
2594         return 0;
2595 }
2596
2597 const struct rte_memzone *
2598 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2599                          uint16_t queue_id, size_t size, unsigned align,
2600                          int socket_id)
2601 {
2602         char z_name[RTE_MEMZONE_NAMESIZE];
2603         const struct rte_memzone *mz;
2604
2605         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2606                  dev->driver->pci_drv.driver.name, ring_name,
2607                  dev->data->port_id, queue_id);
2608
2609         mz = rte_memzone_lookup(z_name);
2610         if (mz)
2611                 return mz;
2612
2613         if (rte_xen_dom0_supported())
2614                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2615                                                    0, align, RTE_PGSIZE_2M);
2616         else
2617                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2618                                                    0, align);
2619 }
2620
2621 int
2622 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2623                           int epfd, int op, void *data)
2624 {
2625         uint32_t vec;
2626         struct rte_eth_dev *dev;
2627         struct rte_intr_handle *intr_handle;
2628         int rc;
2629
2630         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2631
2632         dev = &rte_eth_devices[port_id];
2633         if (queue_id >= dev->data->nb_rx_queues) {
2634                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2635                 return -EINVAL;
2636         }
2637
2638         intr_handle = &dev->pci_dev->intr_handle;
2639         if (!intr_handle->intr_vec) {
2640                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2641                 return -EPERM;
2642         }
2643
2644         vec = intr_handle->intr_vec[queue_id];
2645         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2646         if (rc && rc != -EEXIST) {
2647                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2648                                 " op %d epfd %d vec %u\n",
2649                                 port_id, queue_id, op, epfd, vec);
2650                 return rc;
2651         }
2652
2653         return 0;
2654 }
2655
2656 int
2657 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2658                            uint16_t queue_id)
2659 {
2660         struct rte_eth_dev *dev;
2661
2662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2663
2664         dev = &rte_eth_devices[port_id];
2665
2666         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2667         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2668 }
2669
2670 int
2671 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2672                             uint16_t queue_id)
2673 {
2674         struct rte_eth_dev *dev;
2675
2676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2677
2678         dev = &rte_eth_devices[port_id];
2679
2680         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2681         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2682 }
2683
2684 #ifdef RTE_NIC_BYPASS
2685 int rte_eth_dev_bypass_init(uint8_t port_id)
2686 {
2687         struct rte_eth_dev *dev;
2688
2689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2690
2691         dev = &rte_eth_devices[port_id];
2692         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2693         (*dev->dev_ops->bypass_init)(dev);
2694         return 0;
2695 }
2696
2697 int
2698 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2699 {
2700         struct rte_eth_dev *dev;
2701
2702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2703
2704         dev = &rte_eth_devices[port_id];
2705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2706         (*dev->dev_ops->bypass_state_show)(dev, state);
2707         return 0;
2708 }
2709
2710 int
2711 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2712 {
2713         struct rte_eth_dev *dev;
2714
2715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2716
2717         dev = &rte_eth_devices[port_id];
2718         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2719         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2720         return 0;
2721 }
2722
2723 int
2724 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2725 {
2726         struct rte_eth_dev *dev;
2727
2728         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2729
2730         dev = &rte_eth_devices[port_id];
2731         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2732         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2733         return 0;
2734 }
2735
2736 int
2737 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2738 {
2739         struct rte_eth_dev *dev;
2740
2741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2742
2743         dev = &rte_eth_devices[port_id];
2744
2745         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2746         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2747         return 0;
2748 }
2749
2750 int
2751 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2752 {
2753         struct rte_eth_dev *dev;
2754
2755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2756
2757         dev = &rte_eth_devices[port_id];
2758
2759         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2760         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2761         return 0;
2762 }
2763
2764 int
2765 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2766 {
2767         struct rte_eth_dev *dev;
2768
2769         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2770
2771         dev = &rte_eth_devices[port_id];
2772
2773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2774         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2775         return 0;
2776 }
2777
2778 int
2779 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2780 {
2781         struct rte_eth_dev *dev;
2782
2783         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2784
2785         dev = &rte_eth_devices[port_id];
2786
2787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2788         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2789         return 0;
2790 }
2791
2792 int
2793 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2794 {
2795         struct rte_eth_dev *dev;
2796
2797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2798
2799         dev = &rte_eth_devices[port_id];
2800
2801         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2802         (*dev->dev_ops->bypass_wd_reset)(dev);
2803         return 0;
2804 }
2805 #endif
2806
2807 int
2808 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2809 {
2810         struct rte_eth_dev *dev;
2811
2812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2813
2814         dev = &rte_eth_devices[port_id];
2815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2816         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2817                                 RTE_ETH_FILTER_NOP, NULL);
2818 }
2819
2820 int
2821 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2822                        enum rte_filter_op filter_op, void *arg)
2823 {
2824         struct rte_eth_dev *dev;
2825
2826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2827
2828         dev = &rte_eth_devices[port_id];
2829         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2830         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2831 }
2832
2833 void *
2834 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2835                 rte_rx_callback_fn fn, void *user_param)
2836 {
2837 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2838         rte_errno = ENOTSUP;
2839         return NULL;
2840 #endif
2841         /* check input parameters */
2842         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2843                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2844                 rte_errno = EINVAL;
2845                 return NULL;
2846         }
2847         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2848
2849         if (cb == NULL) {
2850                 rte_errno = ENOMEM;
2851                 return NULL;
2852         }
2853
2854         cb->fn.rx = fn;
2855         cb->param = user_param;
2856
2857         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2858         /* Add the callbacks in fifo order. */
2859         struct rte_eth_rxtx_callback *tail =
2860                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2861
2862         if (!tail) {
2863                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2864
2865         } else {
2866                 while (tail->next)
2867                         tail = tail->next;
2868                 tail->next = cb;
2869         }
2870         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2871
2872         return cb;
2873 }
2874
2875 void *
2876 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2877                 rte_rx_callback_fn fn, void *user_param)
2878 {
2879 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2880         rte_errno = ENOTSUP;
2881         return NULL;
2882 #endif
2883         /* check input parameters */
2884         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2885                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2886                 rte_errno = EINVAL;
2887                 return NULL;
2888         }
2889
2890         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2891
2892         if (cb == NULL) {
2893                 rte_errno = ENOMEM;
2894                 return NULL;
2895         }
2896
2897         cb->fn.rx = fn;
2898         cb->param = user_param;
2899
2900         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2901         /* Add the callbacks at fisrt position*/
2902         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2903         rte_smp_wmb();
2904         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2905         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2906
2907         return cb;
2908 }
2909
2910 void *
2911 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2912                 rte_tx_callback_fn fn, void *user_param)
2913 {
2914 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2915         rte_errno = ENOTSUP;
2916         return NULL;
2917 #endif
2918         /* check input parameters */
2919         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2920                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2921                 rte_errno = EINVAL;
2922                 return NULL;
2923         }
2924
2925         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2926
2927         if (cb == NULL) {
2928                 rte_errno = ENOMEM;
2929                 return NULL;
2930         }
2931
2932         cb->fn.tx = fn;
2933         cb->param = user_param;
2934
2935         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2936         /* Add the callbacks in fifo order. */
2937         struct rte_eth_rxtx_callback *tail =
2938                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2939
2940         if (!tail) {
2941                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2942
2943         } else {
2944                 while (tail->next)
2945                         tail = tail->next;
2946                 tail->next = cb;
2947         }
2948         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2949
2950         return cb;
2951 }
2952
2953 int
2954 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2955                 struct rte_eth_rxtx_callback *user_cb)
2956 {
2957 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2958         return -ENOTSUP;
2959 #endif
2960         /* Check input parameters. */
2961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2962         if (user_cb == NULL ||
2963                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2964                 return -EINVAL;
2965
2966         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2967         struct rte_eth_rxtx_callback *cb;
2968         struct rte_eth_rxtx_callback **prev_cb;
2969         int ret = -EINVAL;
2970
2971         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2972         prev_cb = &dev->post_rx_burst_cbs[queue_id];
2973         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2974                 cb = *prev_cb;
2975                 if (cb == user_cb) {
2976                         /* Remove the user cb from the callback list. */
2977                         *prev_cb = cb->next;
2978                         ret = 0;
2979                         break;
2980                 }
2981         }
2982         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2983
2984         return ret;
2985 }
2986
2987 int
2988 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2989                 struct rte_eth_rxtx_callback *user_cb)
2990 {
2991 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2992         return -ENOTSUP;
2993 #endif
2994         /* Check input parameters. */
2995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2996         if (user_cb == NULL ||
2997                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
2998                 return -EINVAL;
2999
3000         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3001         int ret = -EINVAL;
3002         struct rte_eth_rxtx_callback *cb;
3003         struct rte_eth_rxtx_callback **prev_cb;
3004
3005         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3006         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3007         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3008                 cb = *prev_cb;
3009                 if (cb == user_cb) {
3010                         /* Remove the user cb from the callback list. */
3011                         *prev_cb = cb->next;
3012                         ret = 0;
3013                         break;
3014                 }
3015         }
3016         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3017
3018         return ret;
3019 }
3020
3021 int
3022 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3023         struct rte_eth_rxq_info *qinfo)
3024 {
3025         struct rte_eth_dev *dev;
3026
3027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028
3029         if (qinfo == NULL)
3030                 return -EINVAL;
3031
3032         dev = &rte_eth_devices[port_id];
3033         if (queue_id >= dev->data->nb_rx_queues) {
3034                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3035                 return -EINVAL;
3036         }
3037
3038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3039
3040         memset(qinfo, 0, sizeof(*qinfo));
3041         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3042         return 0;
3043 }
3044
3045 int
3046 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3047         struct rte_eth_txq_info *qinfo)
3048 {
3049         struct rte_eth_dev *dev;
3050
3051         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3052
3053         if (qinfo == NULL)
3054                 return -EINVAL;
3055
3056         dev = &rte_eth_devices[port_id];
3057         if (queue_id >= dev->data->nb_tx_queues) {
3058                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3059                 return -EINVAL;
3060         }
3061
3062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3063
3064         memset(qinfo, 0, sizeof(*qinfo));
3065         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3066         return 0;
3067 }
3068
3069 int
3070 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3071                              struct ether_addr *mc_addr_set,
3072                              uint32_t nb_mc_addr)
3073 {
3074         struct rte_eth_dev *dev;
3075
3076         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3077
3078         dev = &rte_eth_devices[port_id];
3079         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3080         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3081 }
3082
3083 int
3084 rte_eth_timesync_enable(uint8_t port_id)
3085 {
3086         struct rte_eth_dev *dev;
3087
3088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3089         dev = &rte_eth_devices[port_id];
3090
3091         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3092         return (*dev->dev_ops->timesync_enable)(dev);
3093 }
3094
3095 int
3096 rte_eth_timesync_disable(uint8_t port_id)
3097 {
3098         struct rte_eth_dev *dev;
3099
3100         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3101         dev = &rte_eth_devices[port_id];
3102
3103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3104         return (*dev->dev_ops->timesync_disable)(dev);
3105 }
3106
3107 int
3108 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3109                                    uint32_t flags)
3110 {
3111         struct rte_eth_dev *dev;
3112
3113         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3114         dev = &rte_eth_devices[port_id];
3115
3116         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3117         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3118 }
3119
3120 int
3121 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3122 {
3123         struct rte_eth_dev *dev;
3124
3125         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3126         dev = &rte_eth_devices[port_id];
3127
3128         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3129         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3130 }
3131
3132 int
3133 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3134 {
3135         struct rte_eth_dev *dev;
3136
3137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3138         dev = &rte_eth_devices[port_id];
3139
3140         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3141         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3142 }
3143
3144 int
3145 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3146 {
3147         struct rte_eth_dev *dev;
3148
3149         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3150         dev = &rte_eth_devices[port_id];
3151
3152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3153         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3154 }
3155
3156 int
3157 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3158 {
3159         struct rte_eth_dev *dev;
3160
3161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3162         dev = &rte_eth_devices[port_id];
3163
3164         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3165         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3166 }
3167
3168 int
3169 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3170 {
3171         struct rte_eth_dev *dev;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174
3175         dev = &rte_eth_devices[port_id];
3176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3177         return (*dev->dev_ops->get_reg)(dev, info);
3178 }
3179
3180 int
3181 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3182 {
3183         struct rte_eth_dev *dev;
3184
3185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3186
3187         dev = &rte_eth_devices[port_id];
3188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3189         return (*dev->dev_ops->get_eeprom_length)(dev);
3190 }
3191
3192 int
3193 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3194 {
3195         struct rte_eth_dev *dev;
3196
3197         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3198
3199         dev = &rte_eth_devices[port_id];
3200         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3201         return (*dev->dev_ops->get_eeprom)(dev, info);
3202 }
3203
3204 int
3205 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3206 {
3207         struct rte_eth_dev *dev;
3208
3209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3210
3211         dev = &rte_eth_devices[port_id];
3212         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3213         return (*dev->dev_ops->set_eeprom)(dev, info);
3214 }
3215
3216 int
3217 rte_eth_dev_get_dcb_info(uint8_t port_id,
3218                              struct rte_eth_dcb_info *dcb_info)
3219 {
3220         struct rte_eth_dev *dev;
3221
3222         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3223
3224         dev = &rte_eth_devices[port_id];
3225         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3226
3227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3228         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3229 }
3230
3231 void
3232 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3233 {
3234         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3235                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3236                                 eth_dev, pci_dev);
3237                 return;
3238         }
3239
3240         eth_dev->data->dev_flags = 0;
3241         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3242                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3243         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3244                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3245
3246         eth_dev->data->kdrv = pci_dev->kdrv;
3247         eth_dev->data->numa_node = pci_dev->device.numa_node;
3248         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3249 }
3250
3251 int
3252 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3253                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3254 {
3255         struct rte_eth_dev *dev;
3256
3257         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3258         if (l2_tunnel == NULL) {
3259                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3260                 return -EINVAL;
3261         }
3262
3263         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3264                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3265                 return -EINVAL;
3266         }
3267
3268         dev = &rte_eth_devices[port_id];
3269         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3270                                 -ENOTSUP);
3271         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3272 }
3273
3274 int
3275 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3276                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3277                                   uint32_t mask,
3278                                   uint8_t en)
3279 {
3280         struct rte_eth_dev *dev;
3281
3282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3283
3284         if (l2_tunnel == NULL) {
3285                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3286                 return -EINVAL;
3287         }
3288
3289         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3290                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3291                 return -EINVAL;
3292         }
3293
3294         if (mask == 0) {
3295                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3296                 return -EINVAL;
3297         }
3298
3299         dev = &rte_eth_devices[port_id];
3300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3301                                 -ENOTSUP);
3302         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3303 }