ethdev: release queue before setting up
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 struct rte_eth_dev *
193 rte_eth_dev_allocate(const char *name)
194 {
195         uint8_t port_id;
196         struct rte_eth_dev *eth_dev;
197
198         port_id = rte_eth_dev_find_free_port();
199         if (port_id == RTE_MAX_ETHPORTS) {
200                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201                 return NULL;
202         }
203
204         if (rte_eth_dev_data == NULL)
205                 rte_eth_dev_data_alloc();
206
207         if (rte_eth_dev_allocated(name) != NULL) {
208                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209                                 name);
210                 return NULL;
211         }
212
213         eth_dev = &rte_eth_devices[port_id];
214         eth_dev->data = &rte_eth_dev_data[port_id];
215         memset(eth_dev->data, 0, sizeof(*eth_dev->data));
216         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
217         eth_dev->data->port_id = port_id;
218         eth_dev->data->mtu = ETHER_MTU;
219         TAILQ_INIT(&(eth_dev->link_intr_cbs));
220
221         eth_dev->attached = DEV_ATTACHED;
222         eth_dev_last_created_port = port_id;
223         nb_ports++;
224         return eth_dev;
225 }
226
227 int
228 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
229 {
230         if (eth_dev == NULL)
231                 return -EINVAL;
232
233         eth_dev->attached = DEV_DETACHED;
234         nb_ports--;
235         return 0;
236 }
237
238 int
239 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
240                       struct rte_pci_device *pci_dev)
241 {
242         struct eth_driver    *eth_drv;
243         struct rte_eth_dev *eth_dev;
244         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
245
246         int diag;
247
248         eth_drv = (struct eth_driver *)pci_drv;
249
250         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
251                         sizeof(ethdev_name));
252
253         eth_dev = rte_eth_dev_allocate(ethdev_name);
254         if (eth_dev == NULL)
255                 return -ENOMEM;
256
257         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
258                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
259                                   eth_drv->dev_private_size,
260                                   RTE_CACHE_LINE_SIZE);
261                 if (eth_dev->data->dev_private == NULL)
262                         rte_panic("Cannot allocate memzone for private port data\n");
263         }
264         eth_dev->pci_dev = pci_dev;
265         eth_dev->driver = eth_drv;
266
267         /* Invoke PMD device initialization function */
268         diag = (*eth_drv->eth_dev_init)(eth_dev);
269         if (diag == 0)
270                 return 0;
271
272         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
273                         pci_drv->driver.name,
274                         (unsigned) pci_dev->id.vendor_id,
275                         (unsigned) pci_dev->id.device_id);
276         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
277                 rte_free(eth_dev->data->dev_private);
278         rte_eth_dev_release_port(eth_dev);
279         return diag;
280 }
281
282 int
283 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
284 {
285         const struct eth_driver *eth_drv;
286         struct rte_eth_dev *eth_dev;
287         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
288         int ret;
289
290         if (pci_dev == NULL)
291                 return -EINVAL;
292
293         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
294                         sizeof(ethdev_name));
295
296         eth_dev = rte_eth_dev_allocated(ethdev_name);
297         if (eth_dev == NULL)
298                 return -ENODEV;
299
300         eth_drv = (const struct eth_driver *)pci_dev->driver;
301
302         /* Invoke PMD device uninit function */
303         if (*eth_drv->eth_dev_uninit) {
304                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
305                 if (ret)
306                         return ret;
307         }
308
309         /* free ether device */
310         rte_eth_dev_release_port(eth_dev);
311
312         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
313                 rte_free(eth_dev->data->dev_private);
314
315         eth_dev->pci_dev = NULL;
316         eth_dev->driver = NULL;
317         eth_dev->data = NULL;
318
319         return 0;
320 }
321
322 int
323 rte_eth_dev_is_valid_port(uint8_t port_id)
324 {
325         if (port_id >= RTE_MAX_ETHPORTS ||
326             rte_eth_devices[port_id].attached != DEV_ATTACHED)
327                 return 0;
328         else
329                 return 1;
330 }
331
332 int
333 rte_eth_dev_socket_id(uint8_t port_id)
334 {
335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
336         return rte_eth_devices[port_id].data->numa_node;
337 }
338
339 uint8_t
340 rte_eth_dev_count(void)
341 {
342         return nb_ports;
343 }
344
345 int
346 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
347 {
348         char *tmp;
349
350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
351
352         if (name == NULL) {
353                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
354                 return -EINVAL;
355         }
356
357         /* shouldn't check 'rte_eth_devices[i].data',
358          * because it might be overwritten by VDEV PMD */
359         tmp = rte_eth_dev_data[port_id].name;
360         strcpy(name, tmp);
361         return 0;
362 }
363
364 int
365 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
366 {
367         int i;
368
369         if (name == NULL) {
370                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
371                 return -EINVAL;
372         }
373
374         if (!nb_ports)
375                 return -ENODEV;
376
377         *port_id = RTE_MAX_ETHPORTS;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380
381                 if (!strncmp(name,
382                         rte_eth_dev_data[i].name, strlen(name))) {
383
384                         *port_id = i;
385
386                         return 0;
387                 }
388         }
389         return -ENODEV;
390 }
391
392 static int
393 rte_eth_dev_is_detachable(uint8_t port_id)
394 {
395         uint32_t dev_flags;
396
397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
398
399         switch (rte_eth_devices[port_id].data->kdrv) {
400         case RTE_KDRV_IGB_UIO:
401         case RTE_KDRV_UIO_GENERIC:
402         case RTE_KDRV_NIC_UIO:
403         case RTE_KDRV_NONE:
404                 break;
405         case RTE_KDRV_VFIO:
406         default:
407                 return -ENOTSUP;
408         }
409         dev_flags = rte_eth_devices[port_id].data->dev_flags;
410         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
411                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
412                 return 0;
413         else
414                 return 1;
415 }
416
417 /* attach the new device, then store port_id of the device */
418 int
419 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
420 {
421         int ret = -1;
422         int current = rte_eth_dev_count();
423         char *name = NULL;
424         char *args = NULL;
425
426         if ((devargs == NULL) || (port_id == NULL)) {
427                 ret = -EINVAL;
428                 goto err;
429         }
430
431         /* parse devargs, then retrieve device name and args */
432         if (rte_eal_parse_devargs_str(devargs, &name, &args))
433                 goto err;
434
435         ret = rte_eal_dev_attach(name, args);
436         if (ret < 0)
437                 goto err;
438
439         /* no point looking at the port count if no port exists */
440         if (!rte_eth_dev_count()) {
441                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
442                 ret = -1;
443                 goto err;
444         }
445
446         /* if nothing happened, there is a bug here, since some driver told us
447          * it did attach a device, but did not create a port.
448          */
449         if (current == rte_eth_dev_count()) {
450                 ret = -1;
451                 goto err;
452         }
453
454         *port_id = eth_dev_last_created_port;
455         ret = 0;
456
457 err:
458         free(name);
459         free(args);
460         return ret;
461 }
462
463 /* detach the device, then store the name of the device */
464 int
465 rte_eth_dev_detach(uint8_t port_id, char *name)
466 {
467         int ret = -1;
468
469         if (name == NULL) {
470                 ret = -EINVAL;
471                 goto err;
472         }
473
474         /* FIXME: move this to eal, once device flags are relocated there */
475         if (rte_eth_dev_is_detachable(port_id))
476                 goto err;
477
478         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
479                  "%s", rte_eth_devices[port_id].data->name);
480         ret = rte_eal_dev_detach(name);
481         if (ret < 0)
482                 goto err;
483
484         return 0;
485
486 err:
487         return ret;
488 }
489
490 static int
491 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
492 {
493         uint16_t old_nb_queues = dev->data->nb_rx_queues;
494         void **rxq;
495         unsigned i;
496
497         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
498                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
499                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
500                                 RTE_CACHE_LINE_SIZE);
501                 if (dev->data->rx_queues == NULL) {
502                         dev->data->nb_rx_queues = 0;
503                         return -(ENOMEM);
504                 }
505         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
506                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
507
508                 rxq = dev->data->rx_queues;
509
510                 for (i = nb_queues; i < old_nb_queues; i++)
511                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
512                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
513                                 RTE_CACHE_LINE_SIZE);
514                 if (rxq == NULL)
515                         return -(ENOMEM);
516                 if (nb_queues > old_nb_queues) {
517                         uint16_t new_qs = nb_queues - old_nb_queues;
518
519                         memset(rxq + old_nb_queues, 0,
520                                 sizeof(rxq[0]) * new_qs);
521                 }
522
523                 dev->data->rx_queues = rxq;
524
525         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
526                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
527
528                 rxq = dev->data->rx_queues;
529
530                 for (i = nb_queues; i < old_nb_queues; i++)
531                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
532         }
533         dev->data->nb_rx_queues = nb_queues;
534         return 0;
535 }
536
537 int
538 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
539 {
540         struct rte_eth_dev *dev;
541
542         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
543
544         dev = &rte_eth_devices[port_id];
545         if (rx_queue_id >= dev->data->nb_rx_queues) {
546                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
547                 return -EINVAL;
548         }
549
550         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
551
552         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
553                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
554                         " already started\n",
555                         rx_queue_id, port_id);
556                 return 0;
557         }
558
559         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
560
561 }
562
563 int
564 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
565 {
566         struct rte_eth_dev *dev;
567
568         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
569
570         dev = &rte_eth_devices[port_id];
571         if (rx_queue_id >= dev->data->nb_rx_queues) {
572                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
573                 return -EINVAL;
574         }
575
576         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
577
578         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
579                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
580                         " already stopped\n",
581                         rx_queue_id, port_id);
582                 return 0;
583         }
584
585         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
586
587 }
588
589 int
590 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
591 {
592         struct rte_eth_dev *dev;
593
594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
595
596         dev = &rte_eth_devices[port_id];
597         if (tx_queue_id >= dev->data->nb_tx_queues) {
598                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
599                 return -EINVAL;
600         }
601
602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
603
604         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
605                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
606                         " already started\n",
607                         tx_queue_id, port_id);
608                 return 0;
609         }
610
611         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
612
613 }
614
615 int
616 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
617 {
618         struct rte_eth_dev *dev;
619
620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
621
622         dev = &rte_eth_devices[port_id];
623         if (tx_queue_id >= dev->data->nb_tx_queues) {
624                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
625                 return -EINVAL;
626         }
627
628         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
629
630         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
631                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
632                         " already stopped\n",
633                         tx_queue_id, port_id);
634                 return 0;
635         }
636
637         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
638
639 }
640
641 static int
642 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
643 {
644         uint16_t old_nb_queues = dev->data->nb_tx_queues;
645         void **txq;
646         unsigned i;
647
648         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
649                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
650                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
651                                                    RTE_CACHE_LINE_SIZE);
652                 if (dev->data->tx_queues == NULL) {
653                         dev->data->nb_tx_queues = 0;
654                         return -(ENOMEM);
655                 }
656         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
657                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
658
659                 txq = dev->data->tx_queues;
660
661                 for (i = nb_queues; i < old_nb_queues; i++)
662                         (*dev->dev_ops->tx_queue_release)(txq[i]);
663                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
664                                   RTE_CACHE_LINE_SIZE);
665                 if (txq == NULL)
666                         return -ENOMEM;
667                 if (nb_queues > old_nb_queues) {
668                         uint16_t new_qs = nb_queues - old_nb_queues;
669
670                         memset(txq + old_nb_queues, 0,
671                                sizeof(txq[0]) * new_qs);
672                 }
673
674                 dev->data->tx_queues = txq;
675
676         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
677                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
678
679                 txq = dev->data->tx_queues;
680
681                 for (i = nb_queues; i < old_nb_queues; i++)
682                         (*dev->dev_ops->tx_queue_release)(txq[i]);
683         }
684         dev->data->nb_tx_queues = nb_queues;
685         return 0;
686 }
687
688 uint32_t
689 rte_eth_speed_bitflag(uint32_t speed, int duplex)
690 {
691         switch (speed) {
692         case ETH_SPEED_NUM_10M:
693                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
694         case ETH_SPEED_NUM_100M:
695                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
696         case ETH_SPEED_NUM_1G:
697                 return ETH_LINK_SPEED_1G;
698         case ETH_SPEED_NUM_2_5G:
699                 return ETH_LINK_SPEED_2_5G;
700         case ETH_SPEED_NUM_5G:
701                 return ETH_LINK_SPEED_5G;
702         case ETH_SPEED_NUM_10G:
703                 return ETH_LINK_SPEED_10G;
704         case ETH_SPEED_NUM_20G:
705                 return ETH_LINK_SPEED_20G;
706         case ETH_SPEED_NUM_25G:
707                 return ETH_LINK_SPEED_25G;
708         case ETH_SPEED_NUM_40G:
709                 return ETH_LINK_SPEED_40G;
710         case ETH_SPEED_NUM_50G:
711                 return ETH_LINK_SPEED_50G;
712         case ETH_SPEED_NUM_56G:
713                 return ETH_LINK_SPEED_56G;
714         case ETH_SPEED_NUM_100G:
715                 return ETH_LINK_SPEED_100G;
716         default:
717                 return 0;
718         }
719 }
720
721 int
722 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
723                       const struct rte_eth_conf *dev_conf)
724 {
725         struct rte_eth_dev *dev;
726         struct rte_eth_dev_info dev_info;
727         int diag;
728
729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
730
731         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
732                 RTE_PMD_DEBUG_TRACE(
733                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
734                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
735                 return -EINVAL;
736         }
737
738         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
739                 RTE_PMD_DEBUG_TRACE(
740                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
741                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
742                 return -EINVAL;
743         }
744
745         dev = &rte_eth_devices[port_id];
746
747         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
748         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
749
750         if (dev->data->dev_started) {
751                 RTE_PMD_DEBUG_TRACE(
752                     "port %d must be stopped to allow configuration\n", port_id);
753                 return -EBUSY;
754         }
755
756         /* Copy the dev_conf parameter into the dev structure */
757         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
758
759         /*
760          * Check that the numbers of RX and TX queues are not greater
761          * than the maximum number of RX and TX queues supported by the
762          * configured device.
763          */
764         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
765
766         if (nb_rx_q == 0 && nb_tx_q == 0) {
767                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
768                 return -EINVAL;
769         }
770
771         if (nb_rx_q > dev_info.max_rx_queues) {
772                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
773                                 port_id, nb_rx_q, dev_info.max_rx_queues);
774                 return -EINVAL;
775         }
776
777         if (nb_tx_q > dev_info.max_tx_queues) {
778                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
779                                 port_id, nb_tx_q, dev_info.max_tx_queues);
780                 return -EINVAL;
781         }
782
783         /*
784          * If link state interrupt is enabled, check that the
785          * device supports it.
786          */
787         if ((dev_conf->intr_conf.lsc == 1) &&
788                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
789                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
790                                         dev->data->drv_name);
791                         return -EINVAL;
792         }
793
794         /*
795          * If jumbo frames are enabled, check that the maximum RX packet
796          * length is supported by the configured device.
797          */
798         if (dev_conf->rxmode.jumbo_frame == 1) {
799                 if (dev_conf->rxmode.max_rx_pkt_len >
800                     dev_info.max_rx_pktlen) {
801                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
802                                 " > max valid value %u\n",
803                                 port_id,
804                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
805                                 (unsigned)dev_info.max_rx_pktlen);
806                         return -EINVAL;
807                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
808                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
809                                 " < min valid value %u\n",
810                                 port_id,
811                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
812                                 (unsigned)ETHER_MIN_LEN);
813                         return -EINVAL;
814                 }
815         } else {
816                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
817                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
818                         /* Use default value */
819                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
820                                                         ETHER_MAX_LEN;
821         }
822
823         /*
824          * Setup new number of RX/TX queues and reconfigure device.
825          */
826         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
827         if (diag != 0) {
828                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
829                                 port_id, diag);
830                 return diag;
831         }
832
833         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
834         if (diag != 0) {
835                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
836                                 port_id, diag);
837                 rte_eth_dev_rx_queue_config(dev, 0);
838                 return diag;
839         }
840
841         diag = (*dev->dev_ops->dev_configure)(dev);
842         if (diag != 0) {
843                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
844                                 port_id, diag);
845                 rte_eth_dev_rx_queue_config(dev, 0);
846                 rte_eth_dev_tx_queue_config(dev, 0);
847                 return diag;
848         }
849
850         return 0;
851 }
852
853 static void
854 rte_eth_dev_config_restore(uint8_t port_id)
855 {
856         struct rte_eth_dev *dev;
857         struct rte_eth_dev_info dev_info;
858         struct ether_addr addr;
859         uint16_t i;
860         uint32_t pool = 0;
861
862         dev = &rte_eth_devices[port_id];
863
864         rte_eth_dev_info_get(port_id, &dev_info);
865
866         if (RTE_ETH_DEV_SRIOV(dev).active)
867                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
868
869         /* replay MAC address configuration */
870         for (i = 0; i < dev_info.max_mac_addrs; i++) {
871                 addr = dev->data->mac_addrs[i];
872
873                 /* skip zero address */
874                 if (is_zero_ether_addr(&addr))
875                         continue;
876
877                 /* add address to the hardware */
878                 if  (*dev->dev_ops->mac_addr_add &&
879                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
880                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
881                 else {
882                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
883                                         port_id);
884                         /* exit the loop but not return an error */
885                         break;
886                 }
887         }
888
889         /* replay promiscuous configuration */
890         if (rte_eth_promiscuous_get(port_id) == 1)
891                 rte_eth_promiscuous_enable(port_id);
892         else if (rte_eth_promiscuous_get(port_id) == 0)
893                 rte_eth_promiscuous_disable(port_id);
894
895         /* replay all multicast configuration */
896         if (rte_eth_allmulticast_get(port_id) == 1)
897                 rte_eth_allmulticast_enable(port_id);
898         else if (rte_eth_allmulticast_get(port_id) == 0)
899                 rte_eth_allmulticast_disable(port_id);
900 }
901
902 int
903 rte_eth_dev_start(uint8_t port_id)
904 {
905         struct rte_eth_dev *dev;
906         int diag;
907
908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
909
910         dev = &rte_eth_devices[port_id];
911
912         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
913
914         if (dev->data->dev_started != 0) {
915                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
916                         " already started\n",
917                         port_id);
918                 return 0;
919         }
920
921         diag = (*dev->dev_ops->dev_start)(dev);
922         if (diag == 0)
923                 dev->data->dev_started = 1;
924         else
925                 return diag;
926
927         rte_eth_dev_config_restore(port_id);
928
929         if (dev->data->dev_conf.intr_conf.lsc == 0) {
930                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
931                 (*dev->dev_ops->link_update)(dev, 0);
932         }
933         return 0;
934 }
935
936 void
937 rte_eth_dev_stop(uint8_t port_id)
938 {
939         struct rte_eth_dev *dev;
940
941         RTE_ETH_VALID_PORTID_OR_RET(port_id);
942         dev = &rte_eth_devices[port_id];
943
944         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
945
946         if (dev->data->dev_started == 0) {
947                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
948                         " already stopped\n",
949                         port_id);
950                 return;
951         }
952
953         dev->data->dev_started = 0;
954         (*dev->dev_ops->dev_stop)(dev);
955 }
956
957 int
958 rte_eth_dev_set_link_up(uint8_t port_id)
959 {
960         struct rte_eth_dev *dev;
961
962         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
963
964         dev = &rte_eth_devices[port_id];
965
966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
967         return (*dev->dev_ops->dev_set_link_up)(dev);
968 }
969
970 int
971 rte_eth_dev_set_link_down(uint8_t port_id)
972 {
973         struct rte_eth_dev *dev;
974
975         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
976
977         dev = &rte_eth_devices[port_id];
978
979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
980         return (*dev->dev_ops->dev_set_link_down)(dev);
981 }
982
983 void
984 rte_eth_dev_close(uint8_t port_id)
985 {
986         struct rte_eth_dev *dev;
987
988         RTE_ETH_VALID_PORTID_OR_RET(port_id);
989         dev = &rte_eth_devices[port_id];
990
991         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
992         dev->data->dev_started = 0;
993         (*dev->dev_ops->dev_close)(dev);
994
995         rte_free(dev->data->rx_queues);
996         dev->data->rx_queues = NULL;
997         rte_free(dev->data->tx_queues);
998         dev->data->tx_queues = NULL;
999 }
1000
1001 int
1002 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1003                        uint16_t nb_rx_desc, unsigned int socket_id,
1004                        const struct rte_eth_rxconf *rx_conf,
1005                        struct rte_mempool *mp)
1006 {
1007         int ret;
1008         uint32_t mbp_buf_size;
1009         struct rte_eth_dev *dev;
1010         struct rte_eth_dev_info dev_info;
1011         void **rxq;
1012
1013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1014
1015         dev = &rte_eth_devices[port_id];
1016         if (rx_queue_id >= dev->data->nb_rx_queues) {
1017                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1018                 return -EINVAL;
1019         }
1020
1021         if (dev->data->dev_started) {
1022                 RTE_PMD_DEBUG_TRACE(
1023                     "port %d must be stopped to allow configuration\n", port_id);
1024                 return -EBUSY;
1025         }
1026
1027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1028         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1029
1030         /*
1031          * Check the size of the mbuf data buffer.
1032          * This value must be provided in the private data of the memory pool.
1033          * First check that the memory pool has a valid private data.
1034          */
1035         rte_eth_dev_info_get(port_id, &dev_info);
1036         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1037                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1038                                 mp->name, (int) mp->private_data_size,
1039                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1040                 return -ENOSPC;
1041         }
1042         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1043
1044         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1045                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1046                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1047                                 "=%d)\n",
1048                                 mp->name,
1049                                 (int)mbp_buf_size,
1050                                 (int)(RTE_PKTMBUF_HEADROOM +
1051                                       dev_info.min_rx_bufsize),
1052                                 (int)RTE_PKTMBUF_HEADROOM,
1053                                 (int)dev_info.min_rx_bufsize);
1054                 return -EINVAL;
1055         }
1056
1057         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1058                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1059                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1060
1061                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1062                         "should be: <= %hu, = %hu, and a product of %hu\n",
1063                         nb_rx_desc,
1064                         dev_info.rx_desc_lim.nb_max,
1065                         dev_info.rx_desc_lim.nb_min,
1066                         dev_info.rx_desc_lim.nb_align);
1067                 return -EINVAL;
1068         }
1069
1070         rxq = dev->data->rx_queues;
1071         if (rxq[rx_queue_id]) {
1072                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1073                                         -ENOTSUP);
1074                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1075                 rxq[rx_queue_id] = NULL;
1076         }
1077
1078         if (rx_conf == NULL)
1079                 rx_conf = &dev_info.default_rxconf;
1080
1081         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1082                                               socket_id, rx_conf, mp);
1083         if (!ret) {
1084                 if (!dev->data->min_rx_buf_size ||
1085                     dev->data->min_rx_buf_size > mbp_buf_size)
1086                         dev->data->min_rx_buf_size = mbp_buf_size;
1087         }
1088
1089         return ret;
1090 }
1091
1092 int
1093 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1094                        uint16_t nb_tx_desc, unsigned int socket_id,
1095                        const struct rte_eth_txconf *tx_conf)
1096 {
1097         struct rte_eth_dev *dev;
1098         struct rte_eth_dev_info dev_info;
1099         void **txq;
1100
1101         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1102
1103         dev = &rte_eth_devices[port_id];
1104         if (tx_queue_id >= dev->data->nb_tx_queues) {
1105                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1106                 return -EINVAL;
1107         }
1108
1109         if (dev->data->dev_started) {
1110                 RTE_PMD_DEBUG_TRACE(
1111                     "port %d must be stopped to allow configuration\n", port_id);
1112                 return -EBUSY;
1113         }
1114
1115         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1116         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1117
1118         rte_eth_dev_info_get(port_id, &dev_info);
1119
1120         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1121             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1122             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1123                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1124                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1125                                 nb_tx_desc,
1126                                 dev_info.tx_desc_lim.nb_max,
1127                                 dev_info.tx_desc_lim.nb_min,
1128                                 dev_info.tx_desc_lim.nb_align);
1129                 return -EINVAL;
1130         }
1131
1132         txq = dev->data->tx_queues;
1133         if (txq[tx_queue_id]) {
1134                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1135                                         -ENOTSUP);
1136                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1137                 txq[tx_queue_id] = NULL;
1138         }
1139
1140         if (tx_conf == NULL)
1141                 tx_conf = &dev_info.default_txconf;
1142
1143         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1144                                                socket_id, tx_conf);
1145 }
1146
1147 void
1148 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1149                 void *userdata __rte_unused)
1150 {
1151         unsigned i;
1152
1153         for (i = 0; i < unsent; i++)
1154                 rte_pktmbuf_free(pkts[i]);
1155 }
1156
1157 void
1158 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1159                 void *userdata)
1160 {
1161         uint64_t *count = userdata;
1162         unsigned i;
1163
1164         for (i = 0; i < unsent; i++)
1165                 rte_pktmbuf_free(pkts[i]);
1166
1167         *count += unsent;
1168 }
1169
1170 int
1171 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1172                 buffer_tx_error_fn cbfn, void *userdata)
1173 {
1174         buffer->error_callback = cbfn;
1175         buffer->error_userdata = userdata;
1176         return 0;
1177 }
1178
1179 int
1180 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1181 {
1182         int ret = 0;
1183
1184         if (buffer == NULL)
1185                 return -EINVAL;
1186
1187         buffer->size = size;
1188         if (buffer->error_callback == NULL) {
1189                 ret = rte_eth_tx_buffer_set_err_callback(
1190                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1191         }
1192
1193         return ret;
1194 }
1195
1196 void
1197 rte_eth_promiscuous_enable(uint8_t port_id)
1198 {
1199         struct rte_eth_dev *dev;
1200
1201         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1202         dev = &rte_eth_devices[port_id];
1203
1204         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1205         (*dev->dev_ops->promiscuous_enable)(dev);
1206         dev->data->promiscuous = 1;
1207 }
1208
1209 void
1210 rte_eth_promiscuous_disable(uint8_t port_id)
1211 {
1212         struct rte_eth_dev *dev;
1213
1214         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1215         dev = &rte_eth_devices[port_id];
1216
1217         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1218         dev->data->promiscuous = 0;
1219         (*dev->dev_ops->promiscuous_disable)(dev);
1220 }
1221
1222 int
1223 rte_eth_promiscuous_get(uint8_t port_id)
1224 {
1225         struct rte_eth_dev *dev;
1226
1227         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1228
1229         dev = &rte_eth_devices[port_id];
1230         return dev->data->promiscuous;
1231 }
1232
1233 void
1234 rte_eth_allmulticast_enable(uint8_t port_id)
1235 {
1236         struct rte_eth_dev *dev;
1237
1238         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1239         dev = &rte_eth_devices[port_id];
1240
1241         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1242         (*dev->dev_ops->allmulticast_enable)(dev);
1243         dev->data->all_multicast = 1;
1244 }
1245
1246 void
1247 rte_eth_allmulticast_disable(uint8_t port_id)
1248 {
1249         struct rte_eth_dev *dev;
1250
1251         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1252         dev = &rte_eth_devices[port_id];
1253
1254         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1255         dev->data->all_multicast = 0;
1256         (*dev->dev_ops->allmulticast_disable)(dev);
1257 }
1258
1259 int
1260 rte_eth_allmulticast_get(uint8_t port_id)
1261 {
1262         struct rte_eth_dev *dev;
1263
1264         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1265
1266         dev = &rte_eth_devices[port_id];
1267         return dev->data->all_multicast;
1268 }
1269
1270 static inline int
1271 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1272                                 struct rte_eth_link *link)
1273 {
1274         struct rte_eth_link *dst = link;
1275         struct rte_eth_link *src = &(dev->data->dev_link);
1276
1277         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1278                                         *(uint64_t *)src) == 0)
1279                 return -1;
1280
1281         return 0;
1282 }
1283
1284 void
1285 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1286 {
1287         struct rte_eth_dev *dev;
1288
1289         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1290         dev = &rte_eth_devices[port_id];
1291
1292         if (dev->data->dev_conf.intr_conf.lsc != 0)
1293                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1294         else {
1295                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1296                 (*dev->dev_ops->link_update)(dev, 1);
1297                 *eth_link = dev->data->dev_link;
1298         }
1299 }
1300
1301 void
1302 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1303 {
1304         struct rte_eth_dev *dev;
1305
1306         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1307         dev = &rte_eth_devices[port_id];
1308
1309         if (dev->data->dev_conf.intr_conf.lsc != 0)
1310                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1311         else {
1312                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1313                 (*dev->dev_ops->link_update)(dev, 0);
1314                 *eth_link = dev->data->dev_link;
1315         }
1316 }
1317
1318 int
1319 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1320 {
1321         struct rte_eth_dev *dev;
1322
1323         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1324
1325         dev = &rte_eth_devices[port_id];
1326         memset(stats, 0, sizeof(*stats));
1327
1328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1329         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1330         (*dev->dev_ops->stats_get)(dev, stats);
1331         return 0;
1332 }
1333
1334 void
1335 rte_eth_stats_reset(uint8_t port_id)
1336 {
1337         struct rte_eth_dev *dev;
1338
1339         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1340         dev = &rte_eth_devices[port_id];
1341
1342         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1343         (*dev->dev_ops->stats_reset)(dev);
1344         dev->data->rx_mbuf_alloc_failed = 0;
1345 }
1346
1347 static int
1348 get_xstats_count(uint8_t port_id)
1349 {
1350         struct rte_eth_dev *dev;
1351         int count;
1352
1353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1354         dev = &rte_eth_devices[port_id];
1355         if (dev->dev_ops->xstats_get_names != NULL) {
1356                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1357                 if (count < 0)
1358                         return count;
1359         } else
1360                 count = 0;
1361         count += RTE_NB_STATS;
1362         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1363                  RTE_NB_RXQ_STATS;
1364         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1365                  RTE_NB_TXQ_STATS;
1366         return count;
1367 }
1368
1369 int
1370 rte_eth_xstats_get_names(uint8_t port_id,
1371         struct rte_eth_xstat_name *xstats_names,
1372         unsigned size)
1373 {
1374         struct rte_eth_dev *dev;
1375         int cnt_used_entries;
1376         int cnt_expected_entries;
1377         int cnt_driver_entries;
1378         uint32_t idx, id_queue;
1379         uint16_t num_q;
1380
1381         cnt_expected_entries = get_xstats_count(port_id);
1382         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1383                         (int)size < cnt_expected_entries)
1384                 return cnt_expected_entries;
1385
1386         /* port_id checked in get_xstats_count() */
1387         dev = &rte_eth_devices[port_id];
1388         cnt_used_entries = 0;
1389
1390         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1391                 snprintf(xstats_names[cnt_used_entries].name,
1392                         sizeof(xstats_names[0].name),
1393                         "%s", rte_stats_strings[idx].name);
1394                 cnt_used_entries++;
1395         }
1396         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1397         for (id_queue = 0; id_queue < num_q; id_queue++) {
1398                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1399                         snprintf(xstats_names[cnt_used_entries].name,
1400                                 sizeof(xstats_names[0].name),
1401                                 "rx_q%u%s",
1402                                 id_queue, rte_rxq_stats_strings[idx].name);
1403                         cnt_used_entries++;
1404                 }
1405
1406         }
1407         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1408         for (id_queue = 0; id_queue < num_q; id_queue++) {
1409                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1410                         snprintf(xstats_names[cnt_used_entries].name,
1411                                 sizeof(xstats_names[0].name),
1412                                 "tx_q%u%s",
1413                                 id_queue, rte_txq_stats_strings[idx].name);
1414                         cnt_used_entries++;
1415                 }
1416         }
1417
1418         if (dev->dev_ops->xstats_get_names != NULL) {
1419                 /* If there are any driver-specific xstats, append them
1420                  * to end of list.
1421                  */
1422                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1423                         dev,
1424                         xstats_names + cnt_used_entries,
1425                         size - cnt_used_entries);
1426                 if (cnt_driver_entries < 0)
1427                         return cnt_driver_entries;
1428                 cnt_used_entries += cnt_driver_entries;
1429         }
1430
1431         return cnt_used_entries;
1432 }
1433
1434 /* retrieve ethdev extended statistics */
1435 int
1436 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1437         unsigned n)
1438 {
1439         struct rte_eth_stats eth_stats;
1440         struct rte_eth_dev *dev;
1441         unsigned count = 0, i, q;
1442         signed xcount = 0;
1443         uint64_t val, *stats_ptr;
1444         uint16_t nb_rxqs, nb_txqs;
1445
1446         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1447
1448         dev = &rte_eth_devices[port_id];
1449
1450         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1451         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1452
1453         /* Return generic statistics */
1454         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1455                 (nb_txqs * RTE_NB_TXQ_STATS);
1456
1457         /* implemented by the driver */
1458         if (dev->dev_ops->xstats_get != NULL) {
1459                 /* Retrieve the xstats from the driver at the end of the
1460                  * xstats struct.
1461                  */
1462                 xcount = (*dev->dev_ops->xstats_get)(dev,
1463                                      xstats ? xstats + count : NULL,
1464                                      (n > count) ? n - count : 0);
1465
1466                 if (xcount < 0)
1467                         return xcount;
1468         }
1469
1470         if (n < count + xcount || xstats == NULL)
1471                 return count + xcount;
1472
1473         /* now fill the xstats structure */
1474         count = 0;
1475         rte_eth_stats_get(port_id, &eth_stats);
1476
1477         /* global stats */
1478         for (i = 0; i < RTE_NB_STATS; i++) {
1479                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1480                                         rte_stats_strings[i].offset);
1481                 val = *stats_ptr;
1482                 xstats[count++].value = val;
1483         }
1484
1485         /* per-rxq stats */
1486         for (q = 0; q < nb_rxqs; q++) {
1487                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1488                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1489                                         rte_rxq_stats_strings[i].offset +
1490                                         q * sizeof(uint64_t));
1491                         val = *stats_ptr;
1492                         xstats[count++].value = val;
1493                 }
1494         }
1495
1496         /* per-txq stats */
1497         for (q = 0; q < nb_txqs; q++) {
1498                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1499                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1500                                         rte_txq_stats_strings[i].offset +
1501                                         q * sizeof(uint64_t));
1502                         val = *stats_ptr;
1503                         xstats[count++].value = val;
1504                 }
1505         }
1506
1507         for (i = 0; i < count + xcount; i++)
1508                 xstats[i].id = i;
1509
1510         return count + xcount;
1511 }
1512
1513 /* reset ethdev extended statistics */
1514 void
1515 rte_eth_xstats_reset(uint8_t port_id)
1516 {
1517         struct rte_eth_dev *dev;
1518
1519         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1520         dev = &rte_eth_devices[port_id];
1521
1522         /* implemented by the driver */
1523         if (dev->dev_ops->xstats_reset != NULL) {
1524                 (*dev->dev_ops->xstats_reset)(dev);
1525                 return;
1526         }
1527
1528         /* fallback to default */
1529         rte_eth_stats_reset(port_id);
1530 }
1531
1532 static int
1533 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1534                 uint8_t is_rx)
1535 {
1536         struct rte_eth_dev *dev;
1537
1538         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1539
1540         dev = &rte_eth_devices[port_id];
1541
1542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1543         return (*dev->dev_ops->queue_stats_mapping_set)
1544                         (dev, queue_id, stat_idx, is_rx);
1545 }
1546
1547
1548 int
1549 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1550                 uint8_t stat_idx)
1551 {
1552         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1553                         STAT_QMAP_TX);
1554 }
1555
1556
1557 int
1558 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1559                 uint8_t stat_idx)
1560 {
1561         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1562                         STAT_QMAP_RX);
1563 }
1564
1565 void
1566 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1567 {
1568         struct rte_eth_dev *dev;
1569         const struct rte_eth_desc_lim lim = {
1570                 .nb_max = UINT16_MAX,
1571                 .nb_min = 0,
1572                 .nb_align = 1,
1573         };
1574
1575         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1576         dev = &rte_eth_devices[port_id];
1577
1578         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1579         dev_info->rx_desc_lim = lim;
1580         dev_info->tx_desc_lim = lim;
1581
1582         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1583         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1584         dev_info->pci_dev = dev->pci_dev;
1585         dev_info->driver_name = dev->data->drv_name;
1586         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1587         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1588 }
1589
1590 int
1591 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1592                                  uint32_t *ptypes, int num)
1593 {
1594         int i, j;
1595         struct rte_eth_dev *dev;
1596         const uint32_t *all_ptypes;
1597
1598         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1599         dev = &rte_eth_devices[port_id];
1600         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1601         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1602
1603         if (!all_ptypes)
1604                 return 0;
1605
1606         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1607                 if (all_ptypes[i] & ptype_mask) {
1608                         if (j < num)
1609                                 ptypes[j] = all_ptypes[i];
1610                         j++;
1611                 }
1612
1613         return j;
1614 }
1615
1616 void
1617 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1618 {
1619         struct rte_eth_dev *dev;
1620
1621         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1622         dev = &rte_eth_devices[port_id];
1623         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1624 }
1625
1626
1627 int
1628 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1629 {
1630         struct rte_eth_dev *dev;
1631
1632         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1633
1634         dev = &rte_eth_devices[port_id];
1635         *mtu = dev->data->mtu;
1636         return 0;
1637 }
1638
1639 int
1640 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1641 {
1642         int ret;
1643         struct rte_eth_dev *dev;
1644
1645         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1646         dev = &rte_eth_devices[port_id];
1647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1648
1649         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1650         if (!ret)
1651                 dev->data->mtu = mtu;
1652
1653         return ret;
1654 }
1655
1656 int
1657 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1658 {
1659         struct rte_eth_dev *dev;
1660
1661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1662         dev = &rte_eth_devices[port_id];
1663         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1664                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1665                 return -ENOSYS;
1666         }
1667
1668         if (vlan_id > 4095) {
1669                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1670                                 port_id, (unsigned) vlan_id);
1671                 return -EINVAL;
1672         }
1673         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1674
1675         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1676 }
1677
1678 int
1679 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1680 {
1681         struct rte_eth_dev *dev;
1682
1683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1684         dev = &rte_eth_devices[port_id];
1685         if (rx_queue_id >= dev->data->nb_rx_queues) {
1686                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1687                 return -EINVAL;
1688         }
1689
1690         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1691         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1692
1693         return 0;
1694 }
1695
1696 int
1697 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1698                                 enum rte_vlan_type vlan_type,
1699                                 uint16_t tpid)
1700 {
1701         struct rte_eth_dev *dev;
1702
1703         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1704         dev = &rte_eth_devices[port_id];
1705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1706
1707         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1708 }
1709
1710 int
1711 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1712 {
1713         struct rte_eth_dev *dev;
1714         int ret = 0;
1715         int mask = 0;
1716         int cur, org = 0;
1717
1718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1719         dev = &rte_eth_devices[port_id];
1720
1721         /*check which option changed by application*/
1722         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1723         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1724         if (cur != org) {
1725                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1726                 mask |= ETH_VLAN_STRIP_MASK;
1727         }
1728
1729         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1730         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1731         if (cur != org) {
1732                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1733                 mask |= ETH_VLAN_FILTER_MASK;
1734         }
1735
1736         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1737         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1738         if (cur != org) {
1739                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1740                 mask |= ETH_VLAN_EXTEND_MASK;
1741         }
1742
1743         /*no change*/
1744         if (mask == 0)
1745                 return ret;
1746
1747         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1748         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1749
1750         return ret;
1751 }
1752
1753 int
1754 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1755 {
1756         struct rte_eth_dev *dev;
1757         int ret = 0;
1758
1759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1760         dev = &rte_eth_devices[port_id];
1761
1762         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1763                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1764
1765         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1766                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1767
1768         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1769                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1770
1771         return ret;
1772 }
1773
1774 int
1775 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1776 {
1777         struct rte_eth_dev *dev;
1778
1779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1780         dev = &rte_eth_devices[port_id];
1781         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1782         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1783
1784         return 0;
1785 }
1786
1787 int
1788 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1789 {
1790         struct rte_eth_dev *dev;
1791
1792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1793         dev = &rte_eth_devices[port_id];
1794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1795         memset(fc_conf, 0, sizeof(*fc_conf));
1796         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1797 }
1798
1799 int
1800 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1801 {
1802         struct rte_eth_dev *dev;
1803
1804         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1805         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1806                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1807                 return -EINVAL;
1808         }
1809
1810         dev = &rte_eth_devices[port_id];
1811         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1812         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1813 }
1814
1815 int
1816 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1817 {
1818         struct rte_eth_dev *dev;
1819
1820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1821         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1822                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1823                 return -EINVAL;
1824         }
1825
1826         dev = &rte_eth_devices[port_id];
1827         /* High water, low water validation are device specific */
1828         if  (*dev->dev_ops->priority_flow_ctrl_set)
1829                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1830         return -ENOTSUP;
1831 }
1832
1833 static int
1834 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1835                         uint16_t reta_size)
1836 {
1837         uint16_t i, num;
1838
1839         if (!reta_conf)
1840                 return -EINVAL;
1841
1842         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1843                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1844                                                         RTE_RETA_GROUP_SIZE);
1845                 return -EINVAL;
1846         }
1847
1848         num = reta_size / RTE_RETA_GROUP_SIZE;
1849         for (i = 0; i < num; i++) {
1850                 if (reta_conf[i].mask)
1851                         return 0;
1852         }
1853
1854         return -EINVAL;
1855 }
1856
1857 static int
1858 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1859                          uint16_t reta_size,
1860                          uint16_t max_rxq)
1861 {
1862         uint16_t i, idx, shift;
1863
1864         if (!reta_conf)
1865                 return -EINVAL;
1866
1867         if (max_rxq == 0) {
1868                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1869                 return -EINVAL;
1870         }
1871
1872         for (i = 0; i < reta_size; i++) {
1873                 idx = i / RTE_RETA_GROUP_SIZE;
1874                 shift = i % RTE_RETA_GROUP_SIZE;
1875                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1876                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1877                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1878                                 "the maximum rxq index: %u\n", idx, shift,
1879                                 reta_conf[idx].reta[shift], max_rxq);
1880                         return -EINVAL;
1881                 }
1882         }
1883
1884         return 0;
1885 }
1886
1887 int
1888 rte_eth_dev_rss_reta_update(uint8_t port_id,
1889                             struct rte_eth_rss_reta_entry64 *reta_conf,
1890                             uint16_t reta_size)
1891 {
1892         struct rte_eth_dev *dev;
1893         int ret;
1894
1895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1896         /* Check mask bits */
1897         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1898         if (ret < 0)
1899                 return ret;
1900
1901         dev = &rte_eth_devices[port_id];
1902
1903         /* Check entry value */
1904         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1905                                 dev->data->nb_rx_queues);
1906         if (ret < 0)
1907                 return ret;
1908
1909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1910         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1911 }
1912
1913 int
1914 rte_eth_dev_rss_reta_query(uint8_t port_id,
1915                            struct rte_eth_rss_reta_entry64 *reta_conf,
1916                            uint16_t reta_size)
1917 {
1918         struct rte_eth_dev *dev;
1919         int ret;
1920
1921         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1922
1923         /* Check mask bits */
1924         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1925         if (ret < 0)
1926                 return ret;
1927
1928         dev = &rte_eth_devices[port_id];
1929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1930         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1931 }
1932
1933 int
1934 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1935 {
1936         struct rte_eth_dev *dev;
1937         uint16_t rss_hash_protos;
1938
1939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1940         rss_hash_protos = rss_conf->rss_hf;
1941         if ((rss_hash_protos != 0) &&
1942             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1943                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1944                                 rss_hash_protos);
1945                 return -EINVAL;
1946         }
1947         dev = &rte_eth_devices[port_id];
1948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1949         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1950 }
1951
1952 int
1953 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1954                               struct rte_eth_rss_conf *rss_conf)
1955 {
1956         struct rte_eth_dev *dev;
1957
1958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1959         dev = &rte_eth_devices[port_id];
1960         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1961         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1962 }
1963
1964 int
1965 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1966                                 struct rte_eth_udp_tunnel *udp_tunnel)
1967 {
1968         struct rte_eth_dev *dev;
1969
1970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1971         if (udp_tunnel == NULL) {
1972                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1973                 return -EINVAL;
1974         }
1975
1976         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1977                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1978                 return -EINVAL;
1979         }
1980
1981         dev = &rte_eth_devices[port_id];
1982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
1983         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
1984 }
1985
1986 int
1987 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
1988                                    struct rte_eth_udp_tunnel *udp_tunnel)
1989 {
1990         struct rte_eth_dev *dev;
1991
1992         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1993         dev = &rte_eth_devices[port_id];
1994
1995         if (udp_tunnel == NULL) {
1996                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1997                 return -EINVAL;
1998         }
1999
2000         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2001                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2002                 return -EINVAL;
2003         }
2004
2005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2006         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2007 }
2008
2009 int
2010 rte_eth_led_on(uint8_t port_id)
2011 {
2012         struct rte_eth_dev *dev;
2013
2014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2015         dev = &rte_eth_devices[port_id];
2016         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2017         return (*dev->dev_ops->dev_led_on)(dev);
2018 }
2019
2020 int
2021 rte_eth_led_off(uint8_t port_id)
2022 {
2023         struct rte_eth_dev *dev;
2024
2025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2026         dev = &rte_eth_devices[port_id];
2027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2028         return (*dev->dev_ops->dev_led_off)(dev);
2029 }
2030
2031 /*
2032  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2033  * an empty spot.
2034  */
2035 static int
2036 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2037 {
2038         struct rte_eth_dev_info dev_info;
2039         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2040         unsigned i;
2041
2042         rte_eth_dev_info_get(port_id, &dev_info);
2043
2044         for (i = 0; i < dev_info.max_mac_addrs; i++)
2045                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2046                         return i;
2047
2048         return -1;
2049 }
2050
2051 static const struct ether_addr null_mac_addr;
2052
2053 int
2054 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2055                         uint32_t pool)
2056 {
2057         struct rte_eth_dev *dev;
2058         int index;
2059         uint64_t pool_mask;
2060
2061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2062         dev = &rte_eth_devices[port_id];
2063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2064
2065         if (is_zero_ether_addr(addr)) {
2066                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2067                         port_id);
2068                 return -EINVAL;
2069         }
2070         if (pool >= ETH_64_POOLS) {
2071                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2072                 return -EINVAL;
2073         }
2074
2075         index = get_mac_addr_index(port_id, addr);
2076         if (index < 0) {
2077                 index = get_mac_addr_index(port_id, &null_mac_addr);
2078                 if (index < 0) {
2079                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2080                                 port_id);
2081                         return -ENOSPC;
2082                 }
2083         } else {
2084                 pool_mask = dev->data->mac_pool_sel[index];
2085
2086                 /* Check if both MAC address and pool is already there, and do nothing */
2087                 if (pool_mask & (1ULL << pool))
2088                         return 0;
2089         }
2090
2091         /* Update NIC */
2092         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2093
2094         /* Update address in NIC data structure */
2095         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2096
2097         /* Update pool bitmap in NIC data structure */
2098         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2099
2100         return 0;
2101 }
2102
2103 int
2104 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2105 {
2106         struct rte_eth_dev *dev;
2107         int index;
2108
2109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2110         dev = &rte_eth_devices[port_id];
2111         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2112
2113         index = get_mac_addr_index(port_id, addr);
2114         if (index == 0) {
2115                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2116                 return -EADDRINUSE;
2117         } else if (index < 0)
2118                 return 0;  /* Do nothing if address wasn't found */
2119
2120         /* Update NIC */
2121         (*dev->dev_ops->mac_addr_remove)(dev, index);
2122
2123         /* Update address in NIC data structure */
2124         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2125
2126         /* reset pool bitmap */
2127         dev->data->mac_pool_sel[index] = 0;
2128
2129         return 0;
2130 }
2131
2132 int
2133 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2134 {
2135         struct rte_eth_dev *dev;
2136
2137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2138
2139         if (!is_valid_assigned_ether_addr(addr))
2140                 return -EINVAL;
2141
2142         dev = &rte_eth_devices[port_id];
2143         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2144
2145         /* Update default address in NIC data structure */
2146         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2147
2148         (*dev->dev_ops->mac_addr_set)(dev, addr);
2149
2150         return 0;
2151 }
2152
2153 int
2154 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2155                                 uint16_t rx_mode, uint8_t on)
2156 {
2157         uint16_t num_vfs;
2158         struct rte_eth_dev *dev;
2159         struct rte_eth_dev_info dev_info;
2160
2161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2162
2163         dev = &rte_eth_devices[port_id];
2164         rte_eth_dev_info_get(port_id, &dev_info);
2165
2166         num_vfs = dev_info.max_vfs;
2167         if (vf > num_vfs) {
2168                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2169                 return -EINVAL;
2170         }
2171
2172         if (rx_mode == 0) {
2173                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2174                 return -EINVAL;
2175         }
2176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2177         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2178 }
2179
2180 /*
2181  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2182  * an empty spot.
2183  */
2184 static int
2185 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2186 {
2187         struct rte_eth_dev_info dev_info;
2188         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2189         unsigned i;
2190
2191         rte_eth_dev_info_get(port_id, &dev_info);
2192         if (!dev->data->hash_mac_addrs)
2193                 return -1;
2194
2195         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2196                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2197                         ETHER_ADDR_LEN) == 0)
2198                         return i;
2199
2200         return -1;
2201 }
2202
2203 int
2204 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2205                                 uint8_t on)
2206 {
2207         int index;
2208         int ret;
2209         struct rte_eth_dev *dev;
2210
2211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2212
2213         dev = &rte_eth_devices[port_id];
2214         if (is_zero_ether_addr(addr)) {
2215                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2216                         port_id);
2217                 return -EINVAL;
2218         }
2219
2220         index = get_hash_mac_addr_index(port_id, addr);
2221         /* Check if it's already there, and do nothing */
2222         if ((index >= 0) && (on))
2223                 return 0;
2224
2225         if (index < 0) {
2226                 if (!on) {
2227                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2228                                 "set in UTA\n", port_id);
2229                         return -EINVAL;
2230                 }
2231
2232                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2233                 if (index < 0) {
2234                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2235                                         port_id);
2236                         return -ENOSPC;
2237                 }
2238         }
2239
2240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2241         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2242         if (ret == 0) {
2243                 /* Update address in NIC data structure */
2244                 if (on)
2245                         ether_addr_copy(addr,
2246                                         &dev->data->hash_mac_addrs[index]);
2247                 else
2248                         ether_addr_copy(&null_mac_addr,
2249                                         &dev->data->hash_mac_addrs[index]);
2250         }
2251
2252         return ret;
2253 }
2254
2255 int
2256 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2257 {
2258         struct rte_eth_dev *dev;
2259
2260         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2261
2262         dev = &rte_eth_devices[port_id];
2263
2264         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2265         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2266 }
2267
2268 int
2269 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2270 {
2271         uint16_t num_vfs;
2272         struct rte_eth_dev *dev;
2273         struct rte_eth_dev_info dev_info;
2274
2275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2276
2277         dev = &rte_eth_devices[port_id];
2278         rte_eth_dev_info_get(port_id, &dev_info);
2279
2280         num_vfs = dev_info.max_vfs;
2281         if (vf > num_vfs) {
2282                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2283                 return -EINVAL;
2284         }
2285
2286         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2287         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2288 }
2289
2290 int
2291 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2292 {
2293         uint16_t num_vfs;
2294         struct rte_eth_dev *dev;
2295         struct rte_eth_dev_info dev_info;
2296
2297         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2298
2299         dev = &rte_eth_devices[port_id];
2300         rte_eth_dev_info_get(port_id, &dev_info);
2301
2302         num_vfs = dev_info.max_vfs;
2303         if (vf > num_vfs) {
2304                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2305                 return -EINVAL;
2306         }
2307
2308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2309         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2310 }
2311
2312 int
2313 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2314                                uint64_t vf_mask, uint8_t vlan_on)
2315 {
2316         struct rte_eth_dev *dev;
2317
2318         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2319
2320         dev = &rte_eth_devices[port_id];
2321
2322         if (vlan_id > ETHER_MAX_VLAN_ID) {
2323                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2324                         vlan_id);
2325                 return -EINVAL;
2326         }
2327
2328         if (vf_mask == 0) {
2329                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2330                 return -EINVAL;
2331         }
2332
2333         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2334         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2335                                                    vf_mask, vlan_on);
2336 }
2337
2338 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2339                                         uint16_t tx_rate)
2340 {
2341         struct rte_eth_dev *dev;
2342         struct rte_eth_dev_info dev_info;
2343         struct rte_eth_link link;
2344
2345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2346
2347         dev = &rte_eth_devices[port_id];
2348         rte_eth_dev_info_get(port_id, &dev_info);
2349         link = dev->data->dev_link;
2350
2351         if (queue_idx > dev_info.max_tx_queues) {
2352                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2353                                 "invalid queue id=%d\n", port_id, queue_idx);
2354                 return -EINVAL;
2355         }
2356
2357         if (tx_rate > link.link_speed) {
2358                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2359                                 "bigger than link speed= %d\n",
2360                         tx_rate, link.link_speed);
2361                 return -EINVAL;
2362         }
2363
2364         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2365         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2366 }
2367
2368 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2369                                 uint64_t q_msk)
2370 {
2371         struct rte_eth_dev *dev;
2372         struct rte_eth_dev_info dev_info;
2373         struct rte_eth_link link;
2374
2375         if (q_msk == 0)
2376                 return 0;
2377
2378         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2379
2380         dev = &rte_eth_devices[port_id];
2381         rte_eth_dev_info_get(port_id, &dev_info);
2382         link = dev->data->dev_link;
2383
2384         if (vf > dev_info.max_vfs) {
2385                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2386                                 "invalid vf id=%d\n", port_id, vf);
2387                 return -EINVAL;
2388         }
2389
2390         if (tx_rate > link.link_speed) {
2391                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2392                                 "bigger than link speed= %d\n",
2393                                 tx_rate, link.link_speed);
2394                 return -EINVAL;
2395         }
2396
2397         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2398         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2399 }
2400
2401 int
2402 rte_eth_mirror_rule_set(uint8_t port_id,
2403                         struct rte_eth_mirror_conf *mirror_conf,
2404                         uint8_t rule_id, uint8_t on)
2405 {
2406         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2407
2408         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2409         if (mirror_conf->rule_type == 0) {
2410                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2411                 return -EINVAL;
2412         }
2413
2414         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2415                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2416                                 ETH_64_POOLS - 1);
2417                 return -EINVAL;
2418         }
2419
2420         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2421              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2422             (mirror_conf->pool_mask == 0)) {
2423                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2424                 return -EINVAL;
2425         }
2426
2427         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2428             mirror_conf->vlan.vlan_mask == 0) {
2429                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2430                 return -EINVAL;
2431         }
2432
2433         dev = &rte_eth_devices[port_id];
2434         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2435
2436         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2437 }
2438
2439 int
2440 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2441 {
2442         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2443
2444         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2445
2446         dev = &rte_eth_devices[port_id];
2447         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2448
2449         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2450 }
2451
2452 int
2453 rte_eth_dev_callback_register(uint8_t port_id,
2454                         enum rte_eth_event_type event,
2455                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2456 {
2457         struct rte_eth_dev *dev;
2458         struct rte_eth_dev_callback *user_cb;
2459
2460         if (!cb_fn)
2461                 return -EINVAL;
2462
2463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2464
2465         dev = &rte_eth_devices[port_id];
2466         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2467
2468         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2469                 if (user_cb->cb_fn == cb_fn &&
2470                         user_cb->cb_arg == cb_arg &&
2471                         user_cb->event == event) {
2472                         break;
2473                 }
2474         }
2475
2476         /* create a new callback. */
2477         if (user_cb == NULL) {
2478                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2479                                         sizeof(struct rte_eth_dev_callback), 0);
2480                 if (user_cb != NULL) {
2481                         user_cb->cb_fn = cb_fn;
2482                         user_cb->cb_arg = cb_arg;
2483                         user_cb->event = event;
2484                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2485                 }
2486         }
2487
2488         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2489         return (user_cb == NULL) ? -ENOMEM : 0;
2490 }
2491
2492 int
2493 rte_eth_dev_callback_unregister(uint8_t port_id,
2494                         enum rte_eth_event_type event,
2495                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2496 {
2497         int ret;
2498         struct rte_eth_dev *dev;
2499         struct rte_eth_dev_callback *cb, *next;
2500
2501         if (!cb_fn)
2502                 return -EINVAL;
2503
2504         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2505
2506         dev = &rte_eth_devices[port_id];
2507         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2508
2509         ret = 0;
2510         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2511
2512                 next = TAILQ_NEXT(cb, next);
2513
2514                 if (cb->cb_fn != cb_fn || cb->event != event ||
2515                                 (cb->cb_arg != (void *)-1 &&
2516                                 cb->cb_arg != cb_arg))
2517                         continue;
2518
2519                 /*
2520                  * if this callback is not executing right now,
2521                  * then remove it.
2522                  */
2523                 if (cb->active == 0) {
2524                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2525                         rte_free(cb);
2526                 } else {
2527                         ret = -EAGAIN;
2528                 }
2529         }
2530
2531         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2532         return ret;
2533 }
2534
2535 void
2536 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2537         enum rte_eth_event_type event, void *cb_arg)
2538 {
2539         struct rte_eth_dev_callback *cb_lst;
2540         struct rte_eth_dev_callback dev_cb;
2541
2542         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2543         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2544                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2545                         continue;
2546                 dev_cb = *cb_lst;
2547                 cb_lst->active = 1;
2548                 if (cb_arg != NULL)
2549                         dev_cb.cb_arg = (void *) cb_arg;
2550
2551                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2552                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2553                                                 dev_cb.cb_arg);
2554                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2555                 cb_lst->active = 0;
2556         }
2557         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2558 }
2559
2560 int
2561 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2562 {
2563         uint32_t vec;
2564         struct rte_eth_dev *dev;
2565         struct rte_intr_handle *intr_handle;
2566         uint16_t qid;
2567         int rc;
2568
2569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2570
2571         dev = &rte_eth_devices[port_id];
2572         intr_handle = &dev->pci_dev->intr_handle;
2573         if (!intr_handle->intr_vec) {
2574                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2575                 return -EPERM;
2576         }
2577
2578         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2579                 vec = intr_handle->intr_vec[qid];
2580                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2581                 if (rc && rc != -EEXIST) {
2582                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2583                                         " op %d epfd %d vec %u\n",
2584                                         port_id, qid, op, epfd, vec);
2585                 }
2586         }
2587
2588         return 0;
2589 }
2590
2591 const struct rte_memzone *
2592 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2593                          uint16_t queue_id, size_t size, unsigned align,
2594                          int socket_id)
2595 {
2596         char z_name[RTE_MEMZONE_NAMESIZE];
2597         const struct rte_memzone *mz;
2598
2599         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2600                  dev->driver->pci_drv.driver.name, ring_name,
2601                  dev->data->port_id, queue_id);
2602
2603         mz = rte_memzone_lookup(z_name);
2604         if (mz)
2605                 return mz;
2606
2607         if (rte_xen_dom0_supported())
2608                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2609                                                    0, align, RTE_PGSIZE_2M);
2610         else
2611                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2612                                                    0, align);
2613 }
2614
2615 int
2616 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2617                           int epfd, int op, void *data)
2618 {
2619         uint32_t vec;
2620         struct rte_eth_dev *dev;
2621         struct rte_intr_handle *intr_handle;
2622         int rc;
2623
2624         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2625
2626         dev = &rte_eth_devices[port_id];
2627         if (queue_id >= dev->data->nb_rx_queues) {
2628                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2629                 return -EINVAL;
2630         }
2631
2632         intr_handle = &dev->pci_dev->intr_handle;
2633         if (!intr_handle->intr_vec) {
2634                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2635                 return -EPERM;
2636         }
2637
2638         vec = intr_handle->intr_vec[queue_id];
2639         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2640         if (rc && rc != -EEXIST) {
2641                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2642                                 " op %d epfd %d vec %u\n",
2643                                 port_id, queue_id, op, epfd, vec);
2644                 return rc;
2645         }
2646
2647         return 0;
2648 }
2649
2650 int
2651 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2652                            uint16_t queue_id)
2653 {
2654         struct rte_eth_dev *dev;
2655
2656         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2657
2658         dev = &rte_eth_devices[port_id];
2659
2660         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2661         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2662 }
2663
2664 int
2665 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2666                             uint16_t queue_id)
2667 {
2668         struct rte_eth_dev *dev;
2669
2670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2671
2672         dev = &rte_eth_devices[port_id];
2673
2674         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2675         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2676 }
2677
2678 #ifdef RTE_NIC_BYPASS
2679 int rte_eth_dev_bypass_init(uint8_t port_id)
2680 {
2681         struct rte_eth_dev *dev;
2682
2683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684
2685         dev = &rte_eth_devices[port_id];
2686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2687         (*dev->dev_ops->bypass_init)(dev);
2688         return 0;
2689 }
2690
2691 int
2692 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2693 {
2694         struct rte_eth_dev *dev;
2695
2696         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2697
2698         dev = &rte_eth_devices[port_id];
2699         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2700         (*dev->dev_ops->bypass_state_show)(dev, state);
2701         return 0;
2702 }
2703
2704 int
2705 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2706 {
2707         struct rte_eth_dev *dev;
2708
2709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2710
2711         dev = &rte_eth_devices[port_id];
2712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2713         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2714         return 0;
2715 }
2716
2717 int
2718 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2719 {
2720         struct rte_eth_dev *dev;
2721
2722         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2723
2724         dev = &rte_eth_devices[port_id];
2725         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2726         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2727         return 0;
2728 }
2729
2730 int
2731 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2732 {
2733         struct rte_eth_dev *dev;
2734
2735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2736
2737         dev = &rte_eth_devices[port_id];
2738
2739         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2740         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2741         return 0;
2742 }
2743
2744 int
2745 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2746 {
2747         struct rte_eth_dev *dev;
2748
2749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2750
2751         dev = &rte_eth_devices[port_id];
2752
2753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2754         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2755         return 0;
2756 }
2757
2758 int
2759 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2760 {
2761         struct rte_eth_dev *dev;
2762
2763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2764
2765         dev = &rte_eth_devices[port_id];
2766
2767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2768         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2769         return 0;
2770 }
2771
2772 int
2773 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2774 {
2775         struct rte_eth_dev *dev;
2776
2777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2778
2779         dev = &rte_eth_devices[port_id];
2780
2781         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2782         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2783         return 0;
2784 }
2785
2786 int
2787 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2788 {
2789         struct rte_eth_dev *dev;
2790
2791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2792
2793         dev = &rte_eth_devices[port_id];
2794
2795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2796         (*dev->dev_ops->bypass_wd_reset)(dev);
2797         return 0;
2798 }
2799 #endif
2800
2801 int
2802 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2803 {
2804         struct rte_eth_dev *dev;
2805
2806         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2807
2808         dev = &rte_eth_devices[port_id];
2809         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2810         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2811                                 RTE_ETH_FILTER_NOP, NULL);
2812 }
2813
2814 int
2815 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2816                        enum rte_filter_op filter_op, void *arg)
2817 {
2818         struct rte_eth_dev *dev;
2819
2820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2821
2822         dev = &rte_eth_devices[port_id];
2823         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2824         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2825 }
2826
2827 void *
2828 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2829                 rte_rx_callback_fn fn, void *user_param)
2830 {
2831 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2832         rte_errno = ENOTSUP;
2833         return NULL;
2834 #endif
2835         /* check input parameters */
2836         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2837                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2838                 rte_errno = EINVAL;
2839                 return NULL;
2840         }
2841         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2842
2843         if (cb == NULL) {
2844                 rte_errno = ENOMEM;
2845                 return NULL;
2846         }
2847
2848         cb->fn.rx = fn;
2849         cb->param = user_param;
2850
2851         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2852         /* Add the callbacks in fifo order. */
2853         struct rte_eth_rxtx_callback *tail =
2854                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2855
2856         if (!tail) {
2857                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2858
2859         } else {
2860                 while (tail->next)
2861                         tail = tail->next;
2862                 tail->next = cb;
2863         }
2864         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2865
2866         return cb;
2867 }
2868
2869 void *
2870 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2871                 rte_rx_callback_fn fn, void *user_param)
2872 {
2873 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2874         rte_errno = ENOTSUP;
2875         return NULL;
2876 #endif
2877         /* check input parameters */
2878         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2879                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2880                 rte_errno = EINVAL;
2881                 return NULL;
2882         }
2883
2884         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2885
2886         if (cb == NULL) {
2887                 rte_errno = ENOMEM;
2888                 return NULL;
2889         }
2890
2891         cb->fn.rx = fn;
2892         cb->param = user_param;
2893
2894         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2895         /* Add the callbacks at fisrt position*/
2896         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2897         rte_smp_wmb();
2898         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2899         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2900
2901         return cb;
2902 }
2903
2904 void *
2905 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2906                 rte_tx_callback_fn fn, void *user_param)
2907 {
2908 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2909         rte_errno = ENOTSUP;
2910         return NULL;
2911 #endif
2912         /* check input parameters */
2913         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2914                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2915                 rte_errno = EINVAL;
2916                 return NULL;
2917         }
2918
2919         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2920
2921         if (cb == NULL) {
2922                 rte_errno = ENOMEM;
2923                 return NULL;
2924         }
2925
2926         cb->fn.tx = fn;
2927         cb->param = user_param;
2928
2929         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2930         /* Add the callbacks in fifo order. */
2931         struct rte_eth_rxtx_callback *tail =
2932                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2933
2934         if (!tail) {
2935                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2936
2937         } else {
2938                 while (tail->next)
2939                         tail = tail->next;
2940                 tail->next = cb;
2941         }
2942         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2943
2944         return cb;
2945 }
2946
2947 int
2948 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2949                 struct rte_eth_rxtx_callback *user_cb)
2950 {
2951 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2952         return -ENOTSUP;
2953 #endif
2954         /* Check input parameters. */
2955         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2956         if (user_cb == NULL ||
2957                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2958                 return -EINVAL;
2959
2960         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2961         struct rte_eth_rxtx_callback *cb;
2962         struct rte_eth_rxtx_callback **prev_cb;
2963         int ret = -EINVAL;
2964
2965         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2966         prev_cb = &dev->post_rx_burst_cbs[queue_id];
2967         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2968                 cb = *prev_cb;
2969                 if (cb == user_cb) {
2970                         /* Remove the user cb from the callback list. */
2971                         *prev_cb = cb->next;
2972                         ret = 0;
2973                         break;
2974                 }
2975         }
2976         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2977
2978         return ret;
2979 }
2980
2981 int
2982 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2983                 struct rte_eth_rxtx_callback *user_cb)
2984 {
2985 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2986         return -ENOTSUP;
2987 #endif
2988         /* Check input parameters. */
2989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2990         if (user_cb == NULL ||
2991                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
2992                 return -EINVAL;
2993
2994         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2995         int ret = -EINVAL;
2996         struct rte_eth_rxtx_callback *cb;
2997         struct rte_eth_rxtx_callback **prev_cb;
2998
2999         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3000         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3001         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3002                 cb = *prev_cb;
3003                 if (cb == user_cb) {
3004                         /* Remove the user cb from the callback list. */
3005                         *prev_cb = cb->next;
3006                         ret = 0;
3007                         break;
3008                 }
3009         }
3010         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3011
3012         return ret;
3013 }
3014
3015 int
3016 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3017         struct rte_eth_rxq_info *qinfo)
3018 {
3019         struct rte_eth_dev *dev;
3020
3021         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3022
3023         if (qinfo == NULL)
3024                 return -EINVAL;
3025
3026         dev = &rte_eth_devices[port_id];
3027         if (queue_id >= dev->data->nb_rx_queues) {
3028                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3029                 return -EINVAL;
3030         }
3031
3032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3033
3034         memset(qinfo, 0, sizeof(*qinfo));
3035         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3036         return 0;
3037 }
3038
3039 int
3040 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3041         struct rte_eth_txq_info *qinfo)
3042 {
3043         struct rte_eth_dev *dev;
3044
3045         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3046
3047         if (qinfo == NULL)
3048                 return -EINVAL;
3049
3050         dev = &rte_eth_devices[port_id];
3051         if (queue_id >= dev->data->nb_tx_queues) {
3052                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3053                 return -EINVAL;
3054         }
3055
3056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3057
3058         memset(qinfo, 0, sizeof(*qinfo));
3059         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3060         return 0;
3061 }
3062
3063 int
3064 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3065                              struct ether_addr *mc_addr_set,
3066                              uint32_t nb_mc_addr)
3067 {
3068         struct rte_eth_dev *dev;
3069
3070         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3071
3072         dev = &rte_eth_devices[port_id];
3073         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3074         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3075 }
3076
3077 int
3078 rte_eth_timesync_enable(uint8_t port_id)
3079 {
3080         struct rte_eth_dev *dev;
3081
3082         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3083         dev = &rte_eth_devices[port_id];
3084
3085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3086         return (*dev->dev_ops->timesync_enable)(dev);
3087 }
3088
3089 int
3090 rte_eth_timesync_disable(uint8_t port_id)
3091 {
3092         struct rte_eth_dev *dev;
3093
3094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3095         dev = &rte_eth_devices[port_id];
3096
3097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3098         return (*dev->dev_ops->timesync_disable)(dev);
3099 }
3100
3101 int
3102 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3103                                    uint32_t flags)
3104 {
3105         struct rte_eth_dev *dev;
3106
3107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3108         dev = &rte_eth_devices[port_id];
3109
3110         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3111         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3112 }
3113
3114 int
3115 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3116 {
3117         struct rte_eth_dev *dev;
3118
3119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3120         dev = &rte_eth_devices[port_id];
3121
3122         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3123         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3124 }
3125
3126 int
3127 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3128 {
3129         struct rte_eth_dev *dev;
3130
3131         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3132         dev = &rte_eth_devices[port_id];
3133
3134         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3135         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3136 }
3137
3138 int
3139 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3140 {
3141         struct rte_eth_dev *dev;
3142
3143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3144         dev = &rte_eth_devices[port_id];
3145
3146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3147         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3148 }
3149
3150 int
3151 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3152 {
3153         struct rte_eth_dev *dev;
3154
3155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3156         dev = &rte_eth_devices[port_id];
3157
3158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3159         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3160 }
3161
3162 int
3163 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3164 {
3165         struct rte_eth_dev *dev;
3166
3167         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3168
3169         dev = &rte_eth_devices[port_id];
3170         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3171         return (*dev->dev_ops->get_reg)(dev, info);
3172 }
3173
3174 int
3175 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3176 {
3177         struct rte_eth_dev *dev;
3178
3179         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3180
3181         dev = &rte_eth_devices[port_id];
3182         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3183         return (*dev->dev_ops->get_eeprom_length)(dev);
3184 }
3185
3186 int
3187 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3188 {
3189         struct rte_eth_dev *dev;
3190
3191         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3192
3193         dev = &rte_eth_devices[port_id];
3194         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3195         return (*dev->dev_ops->get_eeprom)(dev, info);
3196 }
3197
3198 int
3199 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3200 {
3201         struct rte_eth_dev *dev;
3202
3203         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3204
3205         dev = &rte_eth_devices[port_id];
3206         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3207         return (*dev->dev_ops->set_eeprom)(dev, info);
3208 }
3209
3210 int
3211 rte_eth_dev_get_dcb_info(uint8_t port_id,
3212                              struct rte_eth_dcb_info *dcb_info)
3213 {
3214         struct rte_eth_dev *dev;
3215
3216         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3217
3218         dev = &rte_eth_devices[port_id];
3219         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3220
3221         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3222         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3223 }
3224
3225 void
3226 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3227 {
3228         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3229                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3230                                 eth_dev, pci_dev);
3231                 return;
3232         }
3233
3234         eth_dev->data->dev_flags = 0;
3235         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3236                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3237         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3238                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3239
3240         eth_dev->data->kdrv = pci_dev->kdrv;
3241         eth_dev->data->numa_node = pci_dev->device.numa_node;
3242         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3243 }
3244
3245 int
3246 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3247                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3248 {
3249         struct rte_eth_dev *dev;
3250
3251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3252         if (l2_tunnel == NULL) {
3253                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3254                 return -EINVAL;
3255         }
3256
3257         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3258                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3259                 return -EINVAL;
3260         }
3261
3262         dev = &rte_eth_devices[port_id];
3263         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3264                                 -ENOTSUP);
3265         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3266 }
3267
3268 int
3269 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3270                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3271                                   uint32_t mask,
3272                                   uint8_t en)
3273 {
3274         struct rte_eth_dev *dev;
3275
3276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3277
3278         if (l2_tunnel == NULL) {
3279                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3280                 return -EINVAL;
3281         }
3282
3283         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3284                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3285                 return -EINVAL;
3286         }
3287
3288         if (mask == 0) {
3289                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3290                 return -EINVAL;
3291         }
3292
3293         dev = &rte_eth_devices[port_id];
3294         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3295                                 -ENOTSUP);
3296         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3297 }