ethdev: add internal reset function
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 struct rte_eth_dev *
193 rte_eth_dev_allocate(const char *name)
194 {
195         uint8_t port_id;
196         struct rte_eth_dev *eth_dev;
197
198         port_id = rte_eth_dev_find_free_port();
199         if (port_id == RTE_MAX_ETHPORTS) {
200                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201                 return NULL;
202         }
203
204         if (rte_eth_dev_data == NULL)
205                 rte_eth_dev_data_alloc();
206
207         if (rte_eth_dev_allocated(name) != NULL) {
208                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209                                 name);
210                 return NULL;
211         }
212
213         eth_dev = &rte_eth_devices[port_id];
214         eth_dev->data = &rte_eth_dev_data[port_id];
215         memset(eth_dev->data, 0, sizeof(*eth_dev->data));
216         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
217         eth_dev->data->port_id = port_id;
218         eth_dev->data->mtu = ETHER_MTU;
219         TAILQ_INIT(&(eth_dev->link_intr_cbs));
220
221         eth_dev->attached = DEV_ATTACHED;
222         eth_dev_last_created_port = port_id;
223         nb_ports++;
224         return eth_dev;
225 }
226
227 int
228 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
229 {
230         if (eth_dev == NULL)
231                 return -EINVAL;
232
233         eth_dev->attached = DEV_DETACHED;
234         nb_ports--;
235         return 0;
236 }
237
238 int
239 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
240                       struct rte_pci_device *pci_dev)
241 {
242         struct eth_driver    *eth_drv;
243         struct rte_eth_dev *eth_dev;
244         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
245
246         int diag;
247
248         eth_drv = (struct eth_driver *)pci_drv;
249
250         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
251                         sizeof(ethdev_name));
252
253         eth_dev = rte_eth_dev_allocate(ethdev_name);
254         if (eth_dev == NULL)
255                 return -ENOMEM;
256
257         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
258                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
259                                   eth_drv->dev_private_size,
260                                   RTE_CACHE_LINE_SIZE);
261                 if (eth_dev->data->dev_private == NULL)
262                         rte_panic("Cannot allocate memzone for private port data\n");
263         }
264         eth_dev->pci_dev = pci_dev;
265         eth_dev->driver = eth_drv;
266
267         /* Invoke PMD device initialization function */
268         diag = (*eth_drv->eth_dev_init)(eth_dev);
269         if (diag == 0)
270                 return 0;
271
272         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
273                         pci_drv->driver.name,
274                         (unsigned) pci_dev->id.vendor_id,
275                         (unsigned) pci_dev->id.device_id);
276         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
277                 rte_free(eth_dev->data->dev_private);
278         rte_eth_dev_release_port(eth_dev);
279         return diag;
280 }
281
282 int
283 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
284 {
285         const struct eth_driver *eth_drv;
286         struct rte_eth_dev *eth_dev;
287         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
288         int ret;
289
290         if (pci_dev == NULL)
291                 return -EINVAL;
292
293         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
294                         sizeof(ethdev_name));
295
296         eth_dev = rte_eth_dev_allocated(ethdev_name);
297         if (eth_dev == NULL)
298                 return -ENODEV;
299
300         eth_drv = (const struct eth_driver *)pci_dev->driver;
301
302         /* Invoke PMD device uninit function */
303         if (*eth_drv->eth_dev_uninit) {
304                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
305                 if (ret)
306                         return ret;
307         }
308
309         /* free ether device */
310         rte_eth_dev_release_port(eth_dev);
311
312         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
313                 rte_free(eth_dev->data->dev_private);
314
315         eth_dev->pci_dev = NULL;
316         eth_dev->driver = NULL;
317         eth_dev->data = NULL;
318
319         return 0;
320 }
321
322 int
323 rte_eth_dev_is_valid_port(uint8_t port_id)
324 {
325         if (port_id >= RTE_MAX_ETHPORTS ||
326             rte_eth_devices[port_id].attached != DEV_ATTACHED)
327                 return 0;
328         else
329                 return 1;
330 }
331
332 int
333 rte_eth_dev_socket_id(uint8_t port_id)
334 {
335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
336         return rte_eth_devices[port_id].data->numa_node;
337 }
338
339 uint8_t
340 rte_eth_dev_count(void)
341 {
342         return nb_ports;
343 }
344
345 int
346 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
347 {
348         char *tmp;
349
350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
351
352         if (name == NULL) {
353                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
354                 return -EINVAL;
355         }
356
357         /* shouldn't check 'rte_eth_devices[i].data',
358          * because it might be overwritten by VDEV PMD */
359         tmp = rte_eth_dev_data[port_id].name;
360         strcpy(name, tmp);
361         return 0;
362 }
363
364 int
365 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
366 {
367         int i;
368
369         if (name == NULL) {
370                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
371                 return -EINVAL;
372         }
373
374         if (!nb_ports)
375                 return -ENODEV;
376
377         *port_id = RTE_MAX_ETHPORTS;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380
381                 if (!strncmp(name,
382                         rte_eth_dev_data[i].name, strlen(name))) {
383
384                         *port_id = i;
385
386                         return 0;
387                 }
388         }
389         return -ENODEV;
390 }
391
392 static int
393 rte_eth_dev_is_detachable(uint8_t port_id)
394 {
395         uint32_t dev_flags;
396
397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
398
399         switch (rte_eth_devices[port_id].data->kdrv) {
400         case RTE_KDRV_IGB_UIO:
401         case RTE_KDRV_UIO_GENERIC:
402         case RTE_KDRV_NIC_UIO:
403         case RTE_KDRV_NONE:
404                 break;
405         case RTE_KDRV_VFIO:
406         default:
407                 return -ENOTSUP;
408         }
409         dev_flags = rte_eth_devices[port_id].data->dev_flags;
410         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
411                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
412                 return 0;
413         else
414                 return 1;
415 }
416
417 /* attach the new device, then store port_id of the device */
418 int
419 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
420 {
421         int ret = -1;
422         int current = rte_eth_dev_count();
423         char *name = NULL;
424         char *args = NULL;
425
426         if ((devargs == NULL) || (port_id == NULL)) {
427                 ret = -EINVAL;
428                 goto err;
429         }
430
431         /* parse devargs, then retrieve device name and args */
432         if (rte_eal_parse_devargs_str(devargs, &name, &args))
433                 goto err;
434
435         ret = rte_eal_dev_attach(name, args);
436         if (ret < 0)
437                 goto err;
438
439         /* no point looking at the port count if no port exists */
440         if (!rte_eth_dev_count()) {
441                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
442                 ret = -1;
443                 goto err;
444         }
445
446         /* if nothing happened, there is a bug here, since some driver told us
447          * it did attach a device, but did not create a port.
448          */
449         if (current == rte_eth_dev_count()) {
450                 ret = -1;
451                 goto err;
452         }
453
454         *port_id = eth_dev_last_created_port;
455         ret = 0;
456
457 err:
458         free(name);
459         free(args);
460         return ret;
461 }
462
463 /* detach the device, then store the name of the device */
464 int
465 rte_eth_dev_detach(uint8_t port_id, char *name)
466 {
467         int ret = -1;
468
469         if (name == NULL) {
470                 ret = -EINVAL;
471                 goto err;
472         }
473
474         /* FIXME: move this to eal, once device flags are relocated there */
475         if (rte_eth_dev_is_detachable(port_id))
476                 goto err;
477
478         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
479                  "%s", rte_eth_devices[port_id].data->name);
480         ret = rte_eal_dev_detach(name);
481         if (ret < 0)
482                 goto err;
483
484         return 0;
485
486 err:
487         return ret;
488 }
489
490 static int
491 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
492 {
493         uint16_t old_nb_queues = dev->data->nb_rx_queues;
494         void **rxq;
495         unsigned i;
496
497         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
498                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
499                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
500                                 RTE_CACHE_LINE_SIZE);
501                 if (dev->data->rx_queues == NULL) {
502                         dev->data->nb_rx_queues = 0;
503                         return -(ENOMEM);
504                 }
505         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
506                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
507
508                 rxq = dev->data->rx_queues;
509
510                 for (i = nb_queues; i < old_nb_queues; i++)
511                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
512                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
513                                 RTE_CACHE_LINE_SIZE);
514                 if (rxq == NULL)
515                         return -(ENOMEM);
516                 if (nb_queues > old_nb_queues) {
517                         uint16_t new_qs = nb_queues - old_nb_queues;
518
519                         memset(rxq + old_nb_queues, 0,
520                                 sizeof(rxq[0]) * new_qs);
521                 }
522
523                 dev->data->rx_queues = rxq;
524
525         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
526                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
527
528                 rxq = dev->data->rx_queues;
529
530                 for (i = nb_queues; i < old_nb_queues; i++)
531                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
532
533                 rte_free(dev->data->rx_queues);
534                 dev->data->rx_queues = NULL;
535         }
536         dev->data->nb_rx_queues = nb_queues;
537         return 0;
538 }
539
540 int
541 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
542 {
543         struct rte_eth_dev *dev;
544
545         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
546
547         dev = &rte_eth_devices[port_id];
548         if (rx_queue_id >= dev->data->nb_rx_queues) {
549                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
550                 return -EINVAL;
551         }
552
553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
554
555         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
556                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
557                         " already started\n",
558                         rx_queue_id, port_id);
559                 return 0;
560         }
561
562         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
563
564 }
565
566 int
567 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
568 {
569         struct rte_eth_dev *dev;
570
571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
572
573         dev = &rte_eth_devices[port_id];
574         if (rx_queue_id >= dev->data->nb_rx_queues) {
575                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
576                 return -EINVAL;
577         }
578
579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
580
581         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
582                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
583                         " already stopped\n",
584                         rx_queue_id, port_id);
585                 return 0;
586         }
587
588         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
589
590 }
591
592 int
593 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
594 {
595         struct rte_eth_dev *dev;
596
597         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
598
599         dev = &rte_eth_devices[port_id];
600         if (tx_queue_id >= dev->data->nb_tx_queues) {
601                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
602                 return -EINVAL;
603         }
604
605         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
606
607         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
608                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
609                         " already started\n",
610                         tx_queue_id, port_id);
611                 return 0;
612         }
613
614         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
615
616 }
617
618 int
619 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
620 {
621         struct rte_eth_dev *dev;
622
623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
624
625         dev = &rte_eth_devices[port_id];
626         if (tx_queue_id >= dev->data->nb_tx_queues) {
627                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
628                 return -EINVAL;
629         }
630
631         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
632
633         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
634                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
635                         " already stopped\n",
636                         tx_queue_id, port_id);
637                 return 0;
638         }
639
640         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
641
642 }
643
644 static int
645 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
646 {
647         uint16_t old_nb_queues = dev->data->nb_tx_queues;
648         void **txq;
649         unsigned i;
650
651         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
652                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
653                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
654                                                    RTE_CACHE_LINE_SIZE);
655                 if (dev->data->tx_queues == NULL) {
656                         dev->data->nb_tx_queues = 0;
657                         return -(ENOMEM);
658                 }
659         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
660                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
661
662                 txq = dev->data->tx_queues;
663
664                 for (i = nb_queues; i < old_nb_queues; i++)
665                         (*dev->dev_ops->tx_queue_release)(txq[i]);
666                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
667                                   RTE_CACHE_LINE_SIZE);
668                 if (txq == NULL)
669                         return -ENOMEM;
670                 if (nb_queues > old_nb_queues) {
671                         uint16_t new_qs = nb_queues - old_nb_queues;
672
673                         memset(txq + old_nb_queues, 0,
674                                sizeof(txq[0]) * new_qs);
675                 }
676
677                 dev->data->tx_queues = txq;
678
679         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
680                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
681
682                 txq = dev->data->tx_queues;
683
684                 for (i = nb_queues; i < old_nb_queues; i++)
685                         (*dev->dev_ops->tx_queue_release)(txq[i]);
686
687                 rte_free(dev->data->tx_queues);
688                 dev->data->tx_queues = NULL;
689         }
690         dev->data->nb_tx_queues = nb_queues;
691         return 0;
692 }
693
694 uint32_t
695 rte_eth_speed_bitflag(uint32_t speed, int duplex)
696 {
697         switch (speed) {
698         case ETH_SPEED_NUM_10M:
699                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
700         case ETH_SPEED_NUM_100M:
701                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
702         case ETH_SPEED_NUM_1G:
703                 return ETH_LINK_SPEED_1G;
704         case ETH_SPEED_NUM_2_5G:
705                 return ETH_LINK_SPEED_2_5G;
706         case ETH_SPEED_NUM_5G:
707                 return ETH_LINK_SPEED_5G;
708         case ETH_SPEED_NUM_10G:
709                 return ETH_LINK_SPEED_10G;
710         case ETH_SPEED_NUM_20G:
711                 return ETH_LINK_SPEED_20G;
712         case ETH_SPEED_NUM_25G:
713                 return ETH_LINK_SPEED_25G;
714         case ETH_SPEED_NUM_40G:
715                 return ETH_LINK_SPEED_40G;
716         case ETH_SPEED_NUM_50G:
717                 return ETH_LINK_SPEED_50G;
718         case ETH_SPEED_NUM_56G:
719                 return ETH_LINK_SPEED_56G;
720         case ETH_SPEED_NUM_100G:
721                 return ETH_LINK_SPEED_100G;
722         default:
723                 return 0;
724         }
725 }
726
727 int
728 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
729                       const struct rte_eth_conf *dev_conf)
730 {
731         struct rte_eth_dev *dev;
732         struct rte_eth_dev_info dev_info;
733         int diag;
734
735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
736
737         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
738                 RTE_PMD_DEBUG_TRACE(
739                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
740                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
741                 return -EINVAL;
742         }
743
744         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
745                 RTE_PMD_DEBUG_TRACE(
746                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
747                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
748                 return -EINVAL;
749         }
750
751         dev = &rte_eth_devices[port_id];
752
753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
754         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
755
756         if (dev->data->dev_started) {
757                 RTE_PMD_DEBUG_TRACE(
758                     "port %d must be stopped to allow configuration\n", port_id);
759                 return -EBUSY;
760         }
761
762         /* Copy the dev_conf parameter into the dev structure */
763         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
764
765         /*
766          * Check that the numbers of RX and TX queues are not greater
767          * than the maximum number of RX and TX queues supported by the
768          * configured device.
769          */
770         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
771
772         if (nb_rx_q == 0 && nb_tx_q == 0) {
773                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
774                 return -EINVAL;
775         }
776
777         if (nb_rx_q > dev_info.max_rx_queues) {
778                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
779                                 port_id, nb_rx_q, dev_info.max_rx_queues);
780                 return -EINVAL;
781         }
782
783         if (nb_tx_q > dev_info.max_tx_queues) {
784                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
785                                 port_id, nb_tx_q, dev_info.max_tx_queues);
786                 return -EINVAL;
787         }
788
789         /*
790          * If link state interrupt is enabled, check that the
791          * device supports it.
792          */
793         if ((dev_conf->intr_conf.lsc == 1) &&
794                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
795                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
796                                         dev->data->drv_name);
797                         return -EINVAL;
798         }
799
800         /*
801          * If jumbo frames are enabled, check that the maximum RX packet
802          * length is supported by the configured device.
803          */
804         if (dev_conf->rxmode.jumbo_frame == 1) {
805                 if (dev_conf->rxmode.max_rx_pkt_len >
806                     dev_info.max_rx_pktlen) {
807                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
808                                 " > max valid value %u\n",
809                                 port_id,
810                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
811                                 (unsigned)dev_info.max_rx_pktlen);
812                         return -EINVAL;
813                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
814                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
815                                 " < min valid value %u\n",
816                                 port_id,
817                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
818                                 (unsigned)ETHER_MIN_LEN);
819                         return -EINVAL;
820                 }
821         } else {
822                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
823                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
824                         /* Use default value */
825                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
826                                                         ETHER_MAX_LEN;
827         }
828
829         /*
830          * Setup new number of RX/TX queues and reconfigure device.
831          */
832         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
833         if (diag != 0) {
834                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
835                                 port_id, diag);
836                 return diag;
837         }
838
839         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
840         if (diag != 0) {
841                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
842                                 port_id, diag);
843                 rte_eth_dev_rx_queue_config(dev, 0);
844                 return diag;
845         }
846
847         diag = (*dev->dev_ops->dev_configure)(dev);
848         if (diag != 0) {
849                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
850                                 port_id, diag);
851                 rte_eth_dev_rx_queue_config(dev, 0);
852                 rte_eth_dev_tx_queue_config(dev, 0);
853                 return diag;
854         }
855
856         return 0;
857 }
858
859 void
860 _rte_eth_dev_reset(struct rte_eth_dev *dev)
861 {
862         if (dev->data->dev_started) {
863                 RTE_PMD_DEBUG_TRACE(
864                         "port %d must be stopped to allow reset\n",
865                         dev->data->port_id);
866                 return;
867         }
868
869         rte_eth_dev_rx_queue_config(dev, 0);
870         rte_eth_dev_tx_queue_config(dev, 0);
871
872         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
873 }
874
875 static void
876 rte_eth_dev_config_restore(uint8_t port_id)
877 {
878         struct rte_eth_dev *dev;
879         struct rte_eth_dev_info dev_info;
880         struct ether_addr addr;
881         uint16_t i;
882         uint32_t pool = 0;
883
884         dev = &rte_eth_devices[port_id];
885
886         rte_eth_dev_info_get(port_id, &dev_info);
887
888         if (RTE_ETH_DEV_SRIOV(dev).active)
889                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
890
891         /* replay MAC address configuration */
892         for (i = 0; i < dev_info.max_mac_addrs; i++) {
893                 addr = dev->data->mac_addrs[i];
894
895                 /* skip zero address */
896                 if (is_zero_ether_addr(&addr))
897                         continue;
898
899                 /* add address to the hardware */
900                 if  (*dev->dev_ops->mac_addr_add &&
901                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
902                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
903                 else {
904                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
905                                         port_id);
906                         /* exit the loop but not return an error */
907                         break;
908                 }
909         }
910
911         /* replay promiscuous configuration */
912         if (rte_eth_promiscuous_get(port_id) == 1)
913                 rte_eth_promiscuous_enable(port_id);
914         else if (rte_eth_promiscuous_get(port_id) == 0)
915                 rte_eth_promiscuous_disable(port_id);
916
917         /* replay all multicast configuration */
918         if (rte_eth_allmulticast_get(port_id) == 1)
919                 rte_eth_allmulticast_enable(port_id);
920         else if (rte_eth_allmulticast_get(port_id) == 0)
921                 rte_eth_allmulticast_disable(port_id);
922 }
923
924 int
925 rte_eth_dev_start(uint8_t port_id)
926 {
927         struct rte_eth_dev *dev;
928         int diag;
929
930         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
931
932         dev = &rte_eth_devices[port_id];
933
934         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
935
936         if (dev->data->dev_started != 0) {
937                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
938                         " already started\n",
939                         port_id);
940                 return 0;
941         }
942
943         diag = (*dev->dev_ops->dev_start)(dev);
944         if (diag == 0)
945                 dev->data->dev_started = 1;
946         else
947                 return diag;
948
949         rte_eth_dev_config_restore(port_id);
950
951         if (dev->data->dev_conf.intr_conf.lsc == 0) {
952                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
953                 (*dev->dev_ops->link_update)(dev, 0);
954         }
955         return 0;
956 }
957
958 void
959 rte_eth_dev_stop(uint8_t port_id)
960 {
961         struct rte_eth_dev *dev;
962
963         RTE_ETH_VALID_PORTID_OR_RET(port_id);
964         dev = &rte_eth_devices[port_id];
965
966         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
967
968         if (dev->data->dev_started == 0) {
969                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
970                         " already stopped\n",
971                         port_id);
972                 return;
973         }
974
975         dev->data->dev_started = 0;
976         (*dev->dev_ops->dev_stop)(dev);
977 }
978
979 int
980 rte_eth_dev_set_link_up(uint8_t port_id)
981 {
982         struct rte_eth_dev *dev;
983
984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
985
986         dev = &rte_eth_devices[port_id];
987
988         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
989         return (*dev->dev_ops->dev_set_link_up)(dev);
990 }
991
992 int
993 rte_eth_dev_set_link_down(uint8_t port_id)
994 {
995         struct rte_eth_dev *dev;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999         dev = &rte_eth_devices[port_id];
1000
1001         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1002         return (*dev->dev_ops->dev_set_link_down)(dev);
1003 }
1004
1005 void
1006 rte_eth_dev_close(uint8_t port_id)
1007 {
1008         struct rte_eth_dev *dev;
1009
1010         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1011         dev = &rte_eth_devices[port_id];
1012
1013         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1014         dev->data->dev_started = 0;
1015         (*dev->dev_ops->dev_close)(dev);
1016
1017         rte_free(dev->data->rx_queues);
1018         dev->data->rx_queues = NULL;
1019         rte_free(dev->data->tx_queues);
1020         dev->data->tx_queues = NULL;
1021 }
1022
1023 int
1024 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1025                        uint16_t nb_rx_desc, unsigned int socket_id,
1026                        const struct rte_eth_rxconf *rx_conf,
1027                        struct rte_mempool *mp)
1028 {
1029         int ret;
1030         uint32_t mbp_buf_size;
1031         struct rte_eth_dev *dev;
1032         struct rte_eth_dev_info dev_info;
1033         void **rxq;
1034
1035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1036
1037         dev = &rte_eth_devices[port_id];
1038         if (rx_queue_id >= dev->data->nb_rx_queues) {
1039                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1040                 return -EINVAL;
1041         }
1042
1043         if (dev->data->dev_started) {
1044                 RTE_PMD_DEBUG_TRACE(
1045                     "port %d must be stopped to allow configuration\n", port_id);
1046                 return -EBUSY;
1047         }
1048
1049         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1050         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1051
1052         /*
1053          * Check the size of the mbuf data buffer.
1054          * This value must be provided in the private data of the memory pool.
1055          * First check that the memory pool has a valid private data.
1056          */
1057         rte_eth_dev_info_get(port_id, &dev_info);
1058         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1059                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1060                                 mp->name, (int) mp->private_data_size,
1061                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1062                 return -ENOSPC;
1063         }
1064         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1065
1066         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1067                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1068                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1069                                 "=%d)\n",
1070                                 mp->name,
1071                                 (int)mbp_buf_size,
1072                                 (int)(RTE_PKTMBUF_HEADROOM +
1073                                       dev_info.min_rx_bufsize),
1074                                 (int)RTE_PKTMBUF_HEADROOM,
1075                                 (int)dev_info.min_rx_bufsize);
1076                 return -EINVAL;
1077         }
1078
1079         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1080                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1081                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1082
1083                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1084                         "should be: <= %hu, = %hu, and a product of %hu\n",
1085                         nb_rx_desc,
1086                         dev_info.rx_desc_lim.nb_max,
1087                         dev_info.rx_desc_lim.nb_min,
1088                         dev_info.rx_desc_lim.nb_align);
1089                 return -EINVAL;
1090         }
1091
1092         rxq = dev->data->rx_queues;
1093         if (rxq[rx_queue_id]) {
1094                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1095                                         -ENOTSUP);
1096                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1097                 rxq[rx_queue_id] = NULL;
1098         }
1099
1100         if (rx_conf == NULL)
1101                 rx_conf = &dev_info.default_rxconf;
1102
1103         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1104                                               socket_id, rx_conf, mp);
1105         if (!ret) {
1106                 if (!dev->data->min_rx_buf_size ||
1107                     dev->data->min_rx_buf_size > mbp_buf_size)
1108                         dev->data->min_rx_buf_size = mbp_buf_size;
1109         }
1110
1111         return ret;
1112 }
1113
1114 int
1115 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1116                        uint16_t nb_tx_desc, unsigned int socket_id,
1117                        const struct rte_eth_txconf *tx_conf)
1118 {
1119         struct rte_eth_dev *dev;
1120         struct rte_eth_dev_info dev_info;
1121         void **txq;
1122
1123         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1124
1125         dev = &rte_eth_devices[port_id];
1126         if (tx_queue_id >= dev->data->nb_tx_queues) {
1127                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1128                 return -EINVAL;
1129         }
1130
1131         if (dev->data->dev_started) {
1132                 RTE_PMD_DEBUG_TRACE(
1133                     "port %d must be stopped to allow configuration\n", port_id);
1134                 return -EBUSY;
1135         }
1136
1137         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1139
1140         rte_eth_dev_info_get(port_id, &dev_info);
1141
1142         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1143             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1144             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1145                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1146                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1147                                 nb_tx_desc,
1148                                 dev_info.tx_desc_lim.nb_max,
1149                                 dev_info.tx_desc_lim.nb_min,
1150                                 dev_info.tx_desc_lim.nb_align);
1151                 return -EINVAL;
1152         }
1153
1154         txq = dev->data->tx_queues;
1155         if (txq[tx_queue_id]) {
1156                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1157                                         -ENOTSUP);
1158                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1159                 txq[tx_queue_id] = NULL;
1160         }
1161
1162         if (tx_conf == NULL)
1163                 tx_conf = &dev_info.default_txconf;
1164
1165         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1166                                                socket_id, tx_conf);
1167 }
1168
1169 void
1170 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1171                 void *userdata __rte_unused)
1172 {
1173         unsigned i;
1174
1175         for (i = 0; i < unsent; i++)
1176                 rte_pktmbuf_free(pkts[i]);
1177 }
1178
1179 void
1180 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1181                 void *userdata)
1182 {
1183         uint64_t *count = userdata;
1184         unsigned i;
1185
1186         for (i = 0; i < unsent; i++)
1187                 rte_pktmbuf_free(pkts[i]);
1188
1189         *count += unsent;
1190 }
1191
1192 int
1193 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1194                 buffer_tx_error_fn cbfn, void *userdata)
1195 {
1196         buffer->error_callback = cbfn;
1197         buffer->error_userdata = userdata;
1198         return 0;
1199 }
1200
1201 int
1202 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1203 {
1204         int ret = 0;
1205
1206         if (buffer == NULL)
1207                 return -EINVAL;
1208
1209         buffer->size = size;
1210         if (buffer->error_callback == NULL) {
1211                 ret = rte_eth_tx_buffer_set_err_callback(
1212                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1213         }
1214
1215         return ret;
1216 }
1217
1218 void
1219 rte_eth_promiscuous_enable(uint8_t port_id)
1220 {
1221         struct rte_eth_dev *dev;
1222
1223         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1224         dev = &rte_eth_devices[port_id];
1225
1226         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1227         (*dev->dev_ops->promiscuous_enable)(dev);
1228         dev->data->promiscuous = 1;
1229 }
1230
1231 void
1232 rte_eth_promiscuous_disable(uint8_t port_id)
1233 {
1234         struct rte_eth_dev *dev;
1235
1236         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1237         dev = &rte_eth_devices[port_id];
1238
1239         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1240         dev->data->promiscuous = 0;
1241         (*dev->dev_ops->promiscuous_disable)(dev);
1242 }
1243
1244 int
1245 rte_eth_promiscuous_get(uint8_t port_id)
1246 {
1247         struct rte_eth_dev *dev;
1248
1249         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1250
1251         dev = &rte_eth_devices[port_id];
1252         return dev->data->promiscuous;
1253 }
1254
1255 void
1256 rte_eth_allmulticast_enable(uint8_t port_id)
1257 {
1258         struct rte_eth_dev *dev;
1259
1260         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1261         dev = &rte_eth_devices[port_id];
1262
1263         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1264         (*dev->dev_ops->allmulticast_enable)(dev);
1265         dev->data->all_multicast = 1;
1266 }
1267
1268 void
1269 rte_eth_allmulticast_disable(uint8_t port_id)
1270 {
1271         struct rte_eth_dev *dev;
1272
1273         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1274         dev = &rte_eth_devices[port_id];
1275
1276         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1277         dev->data->all_multicast = 0;
1278         (*dev->dev_ops->allmulticast_disable)(dev);
1279 }
1280
1281 int
1282 rte_eth_allmulticast_get(uint8_t port_id)
1283 {
1284         struct rte_eth_dev *dev;
1285
1286         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1287
1288         dev = &rte_eth_devices[port_id];
1289         return dev->data->all_multicast;
1290 }
1291
1292 static inline int
1293 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1294                                 struct rte_eth_link *link)
1295 {
1296         struct rte_eth_link *dst = link;
1297         struct rte_eth_link *src = &(dev->data->dev_link);
1298
1299         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1300                                         *(uint64_t *)src) == 0)
1301                 return -1;
1302
1303         return 0;
1304 }
1305
1306 void
1307 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1308 {
1309         struct rte_eth_dev *dev;
1310
1311         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1312         dev = &rte_eth_devices[port_id];
1313
1314         if (dev->data->dev_conf.intr_conf.lsc != 0)
1315                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1316         else {
1317                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1318                 (*dev->dev_ops->link_update)(dev, 1);
1319                 *eth_link = dev->data->dev_link;
1320         }
1321 }
1322
1323 void
1324 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1325 {
1326         struct rte_eth_dev *dev;
1327
1328         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1329         dev = &rte_eth_devices[port_id];
1330
1331         if (dev->data->dev_conf.intr_conf.lsc != 0)
1332                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1333         else {
1334                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1335                 (*dev->dev_ops->link_update)(dev, 0);
1336                 *eth_link = dev->data->dev_link;
1337         }
1338 }
1339
1340 int
1341 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1342 {
1343         struct rte_eth_dev *dev;
1344
1345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1346
1347         dev = &rte_eth_devices[port_id];
1348         memset(stats, 0, sizeof(*stats));
1349
1350         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1351         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1352         (*dev->dev_ops->stats_get)(dev, stats);
1353         return 0;
1354 }
1355
1356 void
1357 rte_eth_stats_reset(uint8_t port_id)
1358 {
1359         struct rte_eth_dev *dev;
1360
1361         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1362         dev = &rte_eth_devices[port_id];
1363
1364         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1365         (*dev->dev_ops->stats_reset)(dev);
1366         dev->data->rx_mbuf_alloc_failed = 0;
1367 }
1368
1369 static int
1370 get_xstats_count(uint8_t port_id)
1371 {
1372         struct rte_eth_dev *dev;
1373         int count;
1374
1375         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1376         dev = &rte_eth_devices[port_id];
1377         if (dev->dev_ops->xstats_get_names != NULL) {
1378                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1379                 if (count < 0)
1380                         return count;
1381         } else
1382                 count = 0;
1383         count += RTE_NB_STATS;
1384         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1385                  RTE_NB_RXQ_STATS;
1386         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1387                  RTE_NB_TXQ_STATS;
1388         return count;
1389 }
1390
1391 int
1392 rte_eth_xstats_get_names(uint8_t port_id,
1393         struct rte_eth_xstat_name *xstats_names,
1394         unsigned size)
1395 {
1396         struct rte_eth_dev *dev;
1397         int cnt_used_entries;
1398         int cnt_expected_entries;
1399         int cnt_driver_entries;
1400         uint32_t idx, id_queue;
1401         uint16_t num_q;
1402
1403         cnt_expected_entries = get_xstats_count(port_id);
1404         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1405                         (int)size < cnt_expected_entries)
1406                 return cnt_expected_entries;
1407
1408         /* port_id checked in get_xstats_count() */
1409         dev = &rte_eth_devices[port_id];
1410         cnt_used_entries = 0;
1411
1412         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1413                 snprintf(xstats_names[cnt_used_entries].name,
1414                         sizeof(xstats_names[0].name),
1415                         "%s", rte_stats_strings[idx].name);
1416                 cnt_used_entries++;
1417         }
1418         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1419         for (id_queue = 0; id_queue < num_q; id_queue++) {
1420                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1421                         snprintf(xstats_names[cnt_used_entries].name,
1422                                 sizeof(xstats_names[0].name),
1423                                 "rx_q%u%s",
1424                                 id_queue, rte_rxq_stats_strings[idx].name);
1425                         cnt_used_entries++;
1426                 }
1427
1428         }
1429         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1430         for (id_queue = 0; id_queue < num_q; id_queue++) {
1431                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1432                         snprintf(xstats_names[cnt_used_entries].name,
1433                                 sizeof(xstats_names[0].name),
1434                                 "tx_q%u%s",
1435                                 id_queue, rte_txq_stats_strings[idx].name);
1436                         cnt_used_entries++;
1437                 }
1438         }
1439
1440         if (dev->dev_ops->xstats_get_names != NULL) {
1441                 /* If there are any driver-specific xstats, append them
1442                  * to end of list.
1443                  */
1444                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1445                         dev,
1446                         xstats_names + cnt_used_entries,
1447                         size - cnt_used_entries);
1448                 if (cnt_driver_entries < 0)
1449                         return cnt_driver_entries;
1450                 cnt_used_entries += cnt_driver_entries;
1451         }
1452
1453         return cnt_used_entries;
1454 }
1455
1456 /* retrieve ethdev extended statistics */
1457 int
1458 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1459         unsigned n)
1460 {
1461         struct rte_eth_stats eth_stats;
1462         struct rte_eth_dev *dev;
1463         unsigned count = 0, i, q;
1464         signed xcount = 0;
1465         uint64_t val, *stats_ptr;
1466         uint16_t nb_rxqs, nb_txqs;
1467
1468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1469
1470         dev = &rte_eth_devices[port_id];
1471
1472         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1473         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1474
1475         /* Return generic statistics */
1476         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1477                 (nb_txqs * RTE_NB_TXQ_STATS);
1478
1479         /* implemented by the driver */
1480         if (dev->dev_ops->xstats_get != NULL) {
1481                 /* Retrieve the xstats from the driver at the end of the
1482                  * xstats struct.
1483                  */
1484                 xcount = (*dev->dev_ops->xstats_get)(dev,
1485                                      xstats ? xstats + count : NULL,
1486                                      (n > count) ? n - count : 0);
1487
1488                 if (xcount < 0)
1489                         return xcount;
1490         }
1491
1492         if (n < count + xcount || xstats == NULL)
1493                 return count + xcount;
1494
1495         /* now fill the xstats structure */
1496         count = 0;
1497         rte_eth_stats_get(port_id, &eth_stats);
1498
1499         /* global stats */
1500         for (i = 0; i < RTE_NB_STATS; i++) {
1501                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1502                                         rte_stats_strings[i].offset);
1503                 val = *stats_ptr;
1504                 xstats[count++].value = val;
1505         }
1506
1507         /* per-rxq stats */
1508         for (q = 0; q < nb_rxqs; q++) {
1509                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1510                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1511                                         rte_rxq_stats_strings[i].offset +
1512                                         q * sizeof(uint64_t));
1513                         val = *stats_ptr;
1514                         xstats[count++].value = val;
1515                 }
1516         }
1517
1518         /* per-txq stats */
1519         for (q = 0; q < nb_txqs; q++) {
1520                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1521                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1522                                         rte_txq_stats_strings[i].offset +
1523                                         q * sizeof(uint64_t));
1524                         val = *stats_ptr;
1525                         xstats[count++].value = val;
1526                 }
1527         }
1528
1529         for (i = 0; i < count + xcount; i++)
1530                 xstats[i].id = i;
1531
1532         return count + xcount;
1533 }
1534
1535 /* reset ethdev extended statistics */
1536 void
1537 rte_eth_xstats_reset(uint8_t port_id)
1538 {
1539         struct rte_eth_dev *dev;
1540
1541         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1542         dev = &rte_eth_devices[port_id];
1543
1544         /* implemented by the driver */
1545         if (dev->dev_ops->xstats_reset != NULL) {
1546                 (*dev->dev_ops->xstats_reset)(dev);
1547                 return;
1548         }
1549
1550         /* fallback to default */
1551         rte_eth_stats_reset(port_id);
1552 }
1553
1554 static int
1555 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1556                 uint8_t is_rx)
1557 {
1558         struct rte_eth_dev *dev;
1559
1560         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1561
1562         dev = &rte_eth_devices[port_id];
1563
1564         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1565         return (*dev->dev_ops->queue_stats_mapping_set)
1566                         (dev, queue_id, stat_idx, is_rx);
1567 }
1568
1569
1570 int
1571 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1572                 uint8_t stat_idx)
1573 {
1574         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1575                         STAT_QMAP_TX);
1576 }
1577
1578
1579 int
1580 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1581                 uint8_t stat_idx)
1582 {
1583         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1584                         STAT_QMAP_RX);
1585 }
1586
1587 void
1588 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1589 {
1590         struct rte_eth_dev *dev;
1591         const struct rte_eth_desc_lim lim = {
1592                 .nb_max = UINT16_MAX,
1593                 .nb_min = 0,
1594                 .nb_align = 1,
1595         };
1596
1597         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1598         dev = &rte_eth_devices[port_id];
1599
1600         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1601         dev_info->rx_desc_lim = lim;
1602         dev_info->tx_desc_lim = lim;
1603
1604         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1605         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1606         dev_info->pci_dev = dev->pci_dev;
1607         dev_info->driver_name = dev->data->drv_name;
1608         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1609         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1610 }
1611
1612 int
1613 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1614                                  uint32_t *ptypes, int num)
1615 {
1616         int i, j;
1617         struct rte_eth_dev *dev;
1618         const uint32_t *all_ptypes;
1619
1620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1621         dev = &rte_eth_devices[port_id];
1622         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1623         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1624
1625         if (!all_ptypes)
1626                 return 0;
1627
1628         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1629                 if (all_ptypes[i] & ptype_mask) {
1630                         if (j < num)
1631                                 ptypes[j] = all_ptypes[i];
1632                         j++;
1633                 }
1634
1635         return j;
1636 }
1637
1638 void
1639 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1640 {
1641         struct rte_eth_dev *dev;
1642
1643         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1644         dev = &rte_eth_devices[port_id];
1645         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1646 }
1647
1648
1649 int
1650 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1651 {
1652         struct rte_eth_dev *dev;
1653
1654         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1655
1656         dev = &rte_eth_devices[port_id];
1657         *mtu = dev->data->mtu;
1658         return 0;
1659 }
1660
1661 int
1662 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1663 {
1664         int ret;
1665         struct rte_eth_dev *dev;
1666
1667         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1668         dev = &rte_eth_devices[port_id];
1669         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1670
1671         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1672         if (!ret)
1673                 dev->data->mtu = mtu;
1674
1675         return ret;
1676 }
1677
1678 int
1679 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1680 {
1681         struct rte_eth_dev *dev;
1682
1683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1684         dev = &rte_eth_devices[port_id];
1685         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1686                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1687                 return -ENOSYS;
1688         }
1689
1690         if (vlan_id > 4095) {
1691                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1692                                 port_id, (unsigned) vlan_id);
1693                 return -EINVAL;
1694         }
1695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1696
1697         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1698 }
1699
1700 int
1701 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1702 {
1703         struct rte_eth_dev *dev;
1704
1705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1706         dev = &rte_eth_devices[port_id];
1707         if (rx_queue_id >= dev->data->nb_rx_queues) {
1708                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1709                 return -EINVAL;
1710         }
1711
1712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1713         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1714
1715         return 0;
1716 }
1717
1718 int
1719 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1720                                 enum rte_vlan_type vlan_type,
1721                                 uint16_t tpid)
1722 {
1723         struct rte_eth_dev *dev;
1724
1725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1726         dev = &rte_eth_devices[port_id];
1727         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1728
1729         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1730 }
1731
1732 int
1733 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1734 {
1735         struct rte_eth_dev *dev;
1736         int ret = 0;
1737         int mask = 0;
1738         int cur, org = 0;
1739
1740         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1741         dev = &rte_eth_devices[port_id];
1742
1743         /*check which option changed by application*/
1744         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1745         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1746         if (cur != org) {
1747                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1748                 mask |= ETH_VLAN_STRIP_MASK;
1749         }
1750
1751         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1752         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1753         if (cur != org) {
1754                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1755                 mask |= ETH_VLAN_FILTER_MASK;
1756         }
1757
1758         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1759         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1760         if (cur != org) {
1761                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1762                 mask |= ETH_VLAN_EXTEND_MASK;
1763         }
1764
1765         /*no change*/
1766         if (mask == 0)
1767                 return ret;
1768
1769         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1770         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1771
1772         return ret;
1773 }
1774
1775 int
1776 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1777 {
1778         struct rte_eth_dev *dev;
1779         int ret = 0;
1780
1781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1782         dev = &rte_eth_devices[port_id];
1783
1784         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1785                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1786
1787         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1788                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1789
1790         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1791                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1792
1793         return ret;
1794 }
1795
1796 int
1797 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1798 {
1799         struct rte_eth_dev *dev;
1800
1801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1802         dev = &rte_eth_devices[port_id];
1803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1804         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1805
1806         return 0;
1807 }
1808
1809 int
1810 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1811 {
1812         struct rte_eth_dev *dev;
1813
1814         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1815         dev = &rte_eth_devices[port_id];
1816         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1817         memset(fc_conf, 0, sizeof(*fc_conf));
1818         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1819 }
1820
1821 int
1822 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1823 {
1824         struct rte_eth_dev *dev;
1825
1826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1827         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1828                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1829                 return -EINVAL;
1830         }
1831
1832         dev = &rte_eth_devices[port_id];
1833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1834         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1835 }
1836
1837 int
1838 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1839 {
1840         struct rte_eth_dev *dev;
1841
1842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1843         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1844                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1845                 return -EINVAL;
1846         }
1847
1848         dev = &rte_eth_devices[port_id];
1849         /* High water, low water validation are device specific */
1850         if  (*dev->dev_ops->priority_flow_ctrl_set)
1851                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1852         return -ENOTSUP;
1853 }
1854
1855 static int
1856 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1857                         uint16_t reta_size)
1858 {
1859         uint16_t i, num;
1860
1861         if (!reta_conf)
1862                 return -EINVAL;
1863
1864         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1865                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1866                                                         RTE_RETA_GROUP_SIZE);
1867                 return -EINVAL;
1868         }
1869
1870         num = reta_size / RTE_RETA_GROUP_SIZE;
1871         for (i = 0; i < num; i++) {
1872                 if (reta_conf[i].mask)
1873                         return 0;
1874         }
1875
1876         return -EINVAL;
1877 }
1878
1879 static int
1880 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1881                          uint16_t reta_size,
1882                          uint16_t max_rxq)
1883 {
1884         uint16_t i, idx, shift;
1885
1886         if (!reta_conf)
1887                 return -EINVAL;
1888
1889         if (max_rxq == 0) {
1890                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1891                 return -EINVAL;
1892         }
1893
1894         for (i = 0; i < reta_size; i++) {
1895                 idx = i / RTE_RETA_GROUP_SIZE;
1896                 shift = i % RTE_RETA_GROUP_SIZE;
1897                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1898                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1899                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1900                                 "the maximum rxq index: %u\n", idx, shift,
1901                                 reta_conf[idx].reta[shift], max_rxq);
1902                         return -EINVAL;
1903                 }
1904         }
1905
1906         return 0;
1907 }
1908
1909 int
1910 rte_eth_dev_rss_reta_update(uint8_t port_id,
1911                             struct rte_eth_rss_reta_entry64 *reta_conf,
1912                             uint16_t reta_size)
1913 {
1914         struct rte_eth_dev *dev;
1915         int ret;
1916
1917         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1918         /* Check mask bits */
1919         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1920         if (ret < 0)
1921                 return ret;
1922
1923         dev = &rte_eth_devices[port_id];
1924
1925         /* Check entry value */
1926         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1927                                 dev->data->nb_rx_queues);
1928         if (ret < 0)
1929                 return ret;
1930
1931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1932         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1933 }
1934
1935 int
1936 rte_eth_dev_rss_reta_query(uint8_t port_id,
1937                            struct rte_eth_rss_reta_entry64 *reta_conf,
1938                            uint16_t reta_size)
1939 {
1940         struct rte_eth_dev *dev;
1941         int ret;
1942
1943         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1944
1945         /* Check mask bits */
1946         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1947         if (ret < 0)
1948                 return ret;
1949
1950         dev = &rte_eth_devices[port_id];
1951         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1952         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1953 }
1954
1955 int
1956 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1957 {
1958         struct rte_eth_dev *dev;
1959         uint16_t rss_hash_protos;
1960
1961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1962         rss_hash_protos = rss_conf->rss_hf;
1963         if ((rss_hash_protos != 0) &&
1964             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1965                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1966                                 rss_hash_protos);
1967                 return -EINVAL;
1968         }
1969         dev = &rte_eth_devices[port_id];
1970         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1971         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1972 }
1973
1974 int
1975 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1976                               struct rte_eth_rss_conf *rss_conf)
1977 {
1978         struct rte_eth_dev *dev;
1979
1980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1981         dev = &rte_eth_devices[port_id];
1982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1983         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1984 }
1985
1986 int
1987 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1988                                 struct rte_eth_udp_tunnel *udp_tunnel)
1989 {
1990         struct rte_eth_dev *dev;
1991
1992         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1993         if (udp_tunnel == NULL) {
1994                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1995                 return -EINVAL;
1996         }
1997
1998         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1999                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2000                 return -EINVAL;
2001         }
2002
2003         dev = &rte_eth_devices[port_id];
2004         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2005         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2006 }
2007
2008 int
2009 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2010                                    struct rte_eth_udp_tunnel *udp_tunnel)
2011 {
2012         struct rte_eth_dev *dev;
2013
2014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2015         dev = &rte_eth_devices[port_id];
2016
2017         if (udp_tunnel == NULL) {
2018                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2019                 return -EINVAL;
2020         }
2021
2022         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2023                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2024                 return -EINVAL;
2025         }
2026
2027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2028         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2029 }
2030
2031 int
2032 rte_eth_led_on(uint8_t port_id)
2033 {
2034         struct rte_eth_dev *dev;
2035
2036         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2037         dev = &rte_eth_devices[port_id];
2038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2039         return (*dev->dev_ops->dev_led_on)(dev);
2040 }
2041
2042 int
2043 rte_eth_led_off(uint8_t port_id)
2044 {
2045         struct rte_eth_dev *dev;
2046
2047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2048         dev = &rte_eth_devices[port_id];
2049         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2050         return (*dev->dev_ops->dev_led_off)(dev);
2051 }
2052
2053 /*
2054  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2055  * an empty spot.
2056  */
2057 static int
2058 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2059 {
2060         struct rte_eth_dev_info dev_info;
2061         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2062         unsigned i;
2063
2064         rte_eth_dev_info_get(port_id, &dev_info);
2065
2066         for (i = 0; i < dev_info.max_mac_addrs; i++)
2067                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2068                         return i;
2069
2070         return -1;
2071 }
2072
2073 static const struct ether_addr null_mac_addr;
2074
2075 int
2076 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2077                         uint32_t pool)
2078 {
2079         struct rte_eth_dev *dev;
2080         int index;
2081         uint64_t pool_mask;
2082
2083         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2084         dev = &rte_eth_devices[port_id];
2085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2086
2087         if (is_zero_ether_addr(addr)) {
2088                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2089                         port_id);
2090                 return -EINVAL;
2091         }
2092         if (pool >= ETH_64_POOLS) {
2093                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2094                 return -EINVAL;
2095         }
2096
2097         index = get_mac_addr_index(port_id, addr);
2098         if (index < 0) {
2099                 index = get_mac_addr_index(port_id, &null_mac_addr);
2100                 if (index < 0) {
2101                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2102                                 port_id);
2103                         return -ENOSPC;
2104                 }
2105         } else {
2106                 pool_mask = dev->data->mac_pool_sel[index];
2107
2108                 /* Check if both MAC address and pool is already there, and do nothing */
2109                 if (pool_mask & (1ULL << pool))
2110                         return 0;
2111         }
2112
2113         /* Update NIC */
2114         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2115
2116         /* Update address in NIC data structure */
2117         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2118
2119         /* Update pool bitmap in NIC data structure */
2120         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2121
2122         return 0;
2123 }
2124
2125 int
2126 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2127 {
2128         struct rte_eth_dev *dev;
2129         int index;
2130
2131         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2132         dev = &rte_eth_devices[port_id];
2133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2134
2135         index = get_mac_addr_index(port_id, addr);
2136         if (index == 0) {
2137                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2138                 return -EADDRINUSE;
2139         } else if (index < 0)
2140                 return 0;  /* Do nothing if address wasn't found */
2141
2142         /* Update NIC */
2143         (*dev->dev_ops->mac_addr_remove)(dev, index);
2144
2145         /* Update address in NIC data structure */
2146         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2147
2148         /* reset pool bitmap */
2149         dev->data->mac_pool_sel[index] = 0;
2150
2151         return 0;
2152 }
2153
2154 int
2155 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2156 {
2157         struct rte_eth_dev *dev;
2158
2159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2160
2161         if (!is_valid_assigned_ether_addr(addr))
2162                 return -EINVAL;
2163
2164         dev = &rte_eth_devices[port_id];
2165         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2166
2167         /* Update default address in NIC data structure */
2168         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2169
2170         (*dev->dev_ops->mac_addr_set)(dev, addr);
2171
2172         return 0;
2173 }
2174
2175 int
2176 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2177                                 uint16_t rx_mode, uint8_t on)
2178 {
2179         uint16_t num_vfs;
2180         struct rte_eth_dev *dev;
2181         struct rte_eth_dev_info dev_info;
2182
2183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2184
2185         dev = &rte_eth_devices[port_id];
2186         rte_eth_dev_info_get(port_id, &dev_info);
2187
2188         num_vfs = dev_info.max_vfs;
2189         if (vf > num_vfs) {
2190                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2191                 return -EINVAL;
2192         }
2193
2194         if (rx_mode == 0) {
2195                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2196                 return -EINVAL;
2197         }
2198         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2199         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2200 }
2201
2202 /*
2203  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2204  * an empty spot.
2205  */
2206 static int
2207 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2208 {
2209         struct rte_eth_dev_info dev_info;
2210         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2211         unsigned i;
2212
2213         rte_eth_dev_info_get(port_id, &dev_info);
2214         if (!dev->data->hash_mac_addrs)
2215                 return -1;
2216
2217         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2218                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2219                         ETHER_ADDR_LEN) == 0)
2220                         return i;
2221
2222         return -1;
2223 }
2224
2225 int
2226 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2227                                 uint8_t on)
2228 {
2229         int index;
2230         int ret;
2231         struct rte_eth_dev *dev;
2232
2233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2234
2235         dev = &rte_eth_devices[port_id];
2236         if (is_zero_ether_addr(addr)) {
2237                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2238                         port_id);
2239                 return -EINVAL;
2240         }
2241
2242         index = get_hash_mac_addr_index(port_id, addr);
2243         /* Check if it's already there, and do nothing */
2244         if ((index >= 0) && (on))
2245                 return 0;
2246
2247         if (index < 0) {
2248                 if (!on) {
2249                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2250                                 "set in UTA\n", port_id);
2251                         return -EINVAL;
2252                 }
2253
2254                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2255                 if (index < 0) {
2256                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2257                                         port_id);
2258                         return -ENOSPC;
2259                 }
2260         }
2261
2262         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2263         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2264         if (ret == 0) {
2265                 /* Update address in NIC data structure */
2266                 if (on)
2267                         ether_addr_copy(addr,
2268                                         &dev->data->hash_mac_addrs[index]);
2269                 else
2270                         ether_addr_copy(&null_mac_addr,
2271                                         &dev->data->hash_mac_addrs[index]);
2272         }
2273
2274         return ret;
2275 }
2276
2277 int
2278 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2279 {
2280         struct rte_eth_dev *dev;
2281
2282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2283
2284         dev = &rte_eth_devices[port_id];
2285
2286         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2287         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2288 }
2289
2290 int
2291 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2292 {
2293         uint16_t num_vfs;
2294         struct rte_eth_dev *dev;
2295         struct rte_eth_dev_info dev_info;
2296
2297         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2298
2299         dev = &rte_eth_devices[port_id];
2300         rte_eth_dev_info_get(port_id, &dev_info);
2301
2302         num_vfs = dev_info.max_vfs;
2303         if (vf > num_vfs) {
2304                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2305                 return -EINVAL;
2306         }
2307
2308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2309         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2310 }
2311
2312 int
2313 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2314 {
2315         uint16_t num_vfs;
2316         struct rte_eth_dev *dev;
2317         struct rte_eth_dev_info dev_info;
2318
2319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2320
2321         dev = &rte_eth_devices[port_id];
2322         rte_eth_dev_info_get(port_id, &dev_info);
2323
2324         num_vfs = dev_info.max_vfs;
2325         if (vf > num_vfs) {
2326                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2327                 return -EINVAL;
2328         }
2329
2330         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2331         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2332 }
2333
2334 int
2335 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2336                                uint64_t vf_mask, uint8_t vlan_on)
2337 {
2338         struct rte_eth_dev *dev;
2339
2340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341
2342         dev = &rte_eth_devices[port_id];
2343
2344         if (vlan_id > ETHER_MAX_VLAN_ID) {
2345                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2346                         vlan_id);
2347                 return -EINVAL;
2348         }
2349
2350         if (vf_mask == 0) {
2351                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2352                 return -EINVAL;
2353         }
2354
2355         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2356         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2357                                                    vf_mask, vlan_on);
2358 }
2359
2360 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2361                                         uint16_t tx_rate)
2362 {
2363         struct rte_eth_dev *dev;
2364         struct rte_eth_dev_info dev_info;
2365         struct rte_eth_link link;
2366
2367         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2368
2369         dev = &rte_eth_devices[port_id];
2370         rte_eth_dev_info_get(port_id, &dev_info);
2371         link = dev->data->dev_link;
2372
2373         if (queue_idx > dev_info.max_tx_queues) {
2374                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2375                                 "invalid queue id=%d\n", port_id, queue_idx);
2376                 return -EINVAL;
2377         }
2378
2379         if (tx_rate > link.link_speed) {
2380                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2381                                 "bigger than link speed= %d\n",
2382                         tx_rate, link.link_speed);
2383                 return -EINVAL;
2384         }
2385
2386         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2387         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2388 }
2389
2390 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2391                                 uint64_t q_msk)
2392 {
2393         struct rte_eth_dev *dev;
2394         struct rte_eth_dev_info dev_info;
2395         struct rte_eth_link link;
2396
2397         if (q_msk == 0)
2398                 return 0;
2399
2400         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2401
2402         dev = &rte_eth_devices[port_id];
2403         rte_eth_dev_info_get(port_id, &dev_info);
2404         link = dev->data->dev_link;
2405
2406         if (vf > dev_info.max_vfs) {
2407                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2408                                 "invalid vf id=%d\n", port_id, vf);
2409                 return -EINVAL;
2410         }
2411
2412         if (tx_rate > link.link_speed) {
2413                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2414                                 "bigger than link speed= %d\n",
2415                                 tx_rate, link.link_speed);
2416                 return -EINVAL;
2417         }
2418
2419         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2420         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2421 }
2422
2423 int
2424 rte_eth_mirror_rule_set(uint8_t port_id,
2425                         struct rte_eth_mirror_conf *mirror_conf,
2426                         uint8_t rule_id, uint8_t on)
2427 {
2428         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2429
2430         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2431         if (mirror_conf->rule_type == 0) {
2432                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2433                 return -EINVAL;
2434         }
2435
2436         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2437                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2438                                 ETH_64_POOLS - 1);
2439                 return -EINVAL;
2440         }
2441
2442         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2443              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2444             (mirror_conf->pool_mask == 0)) {
2445                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2446                 return -EINVAL;
2447         }
2448
2449         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2450             mirror_conf->vlan.vlan_mask == 0) {
2451                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2452                 return -EINVAL;
2453         }
2454
2455         dev = &rte_eth_devices[port_id];
2456         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2457
2458         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2459 }
2460
2461 int
2462 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2463 {
2464         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2465
2466         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2467
2468         dev = &rte_eth_devices[port_id];
2469         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2470
2471         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2472 }
2473
2474 int
2475 rte_eth_dev_callback_register(uint8_t port_id,
2476                         enum rte_eth_event_type event,
2477                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2478 {
2479         struct rte_eth_dev *dev;
2480         struct rte_eth_dev_callback *user_cb;
2481
2482         if (!cb_fn)
2483                 return -EINVAL;
2484
2485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2486
2487         dev = &rte_eth_devices[port_id];
2488         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2489
2490         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2491                 if (user_cb->cb_fn == cb_fn &&
2492                         user_cb->cb_arg == cb_arg &&
2493                         user_cb->event == event) {
2494                         break;
2495                 }
2496         }
2497
2498         /* create a new callback. */
2499         if (user_cb == NULL) {
2500                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2501                                         sizeof(struct rte_eth_dev_callback), 0);
2502                 if (user_cb != NULL) {
2503                         user_cb->cb_fn = cb_fn;
2504                         user_cb->cb_arg = cb_arg;
2505                         user_cb->event = event;
2506                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2507                 }
2508         }
2509
2510         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2511         return (user_cb == NULL) ? -ENOMEM : 0;
2512 }
2513
2514 int
2515 rte_eth_dev_callback_unregister(uint8_t port_id,
2516                         enum rte_eth_event_type event,
2517                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2518 {
2519         int ret;
2520         struct rte_eth_dev *dev;
2521         struct rte_eth_dev_callback *cb, *next;
2522
2523         if (!cb_fn)
2524                 return -EINVAL;
2525
2526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2527
2528         dev = &rte_eth_devices[port_id];
2529         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2530
2531         ret = 0;
2532         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2533
2534                 next = TAILQ_NEXT(cb, next);
2535
2536                 if (cb->cb_fn != cb_fn || cb->event != event ||
2537                                 (cb->cb_arg != (void *)-1 &&
2538                                 cb->cb_arg != cb_arg))
2539                         continue;
2540
2541                 /*
2542                  * if this callback is not executing right now,
2543                  * then remove it.
2544                  */
2545                 if (cb->active == 0) {
2546                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2547                         rte_free(cb);
2548                 } else {
2549                         ret = -EAGAIN;
2550                 }
2551         }
2552
2553         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2554         return ret;
2555 }
2556
2557 void
2558 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2559         enum rte_eth_event_type event, void *cb_arg)
2560 {
2561         struct rte_eth_dev_callback *cb_lst;
2562         struct rte_eth_dev_callback dev_cb;
2563
2564         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2565         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2566                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2567                         continue;
2568                 dev_cb = *cb_lst;
2569                 cb_lst->active = 1;
2570                 if (cb_arg != NULL)
2571                         dev_cb.cb_arg = (void *) cb_arg;
2572
2573                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2574                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2575                                                 dev_cb.cb_arg);
2576                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2577                 cb_lst->active = 0;
2578         }
2579         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2580 }
2581
2582 int
2583 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2584 {
2585         uint32_t vec;
2586         struct rte_eth_dev *dev;
2587         struct rte_intr_handle *intr_handle;
2588         uint16_t qid;
2589         int rc;
2590
2591         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2592
2593         dev = &rte_eth_devices[port_id];
2594         intr_handle = &dev->pci_dev->intr_handle;
2595         if (!intr_handle->intr_vec) {
2596                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2597                 return -EPERM;
2598         }
2599
2600         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2601                 vec = intr_handle->intr_vec[qid];
2602                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2603                 if (rc && rc != -EEXIST) {
2604                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2605                                         " op %d epfd %d vec %u\n",
2606                                         port_id, qid, op, epfd, vec);
2607                 }
2608         }
2609
2610         return 0;
2611 }
2612
2613 const struct rte_memzone *
2614 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2615                          uint16_t queue_id, size_t size, unsigned align,
2616                          int socket_id)
2617 {
2618         char z_name[RTE_MEMZONE_NAMESIZE];
2619         const struct rte_memzone *mz;
2620
2621         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2622                  dev->driver->pci_drv.driver.name, ring_name,
2623                  dev->data->port_id, queue_id);
2624
2625         mz = rte_memzone_lookup(z_name);
2626         if (mz)
2627                 return mz;
2628
2629         if (rte_xen_dom0_supported())
2630                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2631                                                    0, align, RTE_PGSIZE_2M);
2632         else
2633                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2634                                                    0, align);
2635 }
2636
2637 int
2638 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2639                           int epfd, int op, void *data)
2640 {
2641         uint32_t vec;
2642         struct rte_eth_dev *dev;
2643         struct rte_intr_handle *intr_handle;
2644         int rc;
2645
2646         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2647
2648         dev = &rte_eth_devices[port_id];
2649         if (queue_id >= dev->data->nb_rx_queues) {
2650                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2651                 return -EINVAL;
2652         }
2653
2654         intr_handle = &dev->pci_dev->intr_handle;
2655         if (!intr_handle->intr_vec) {
2656                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2657                 return -EPERM;
2658         }
2659
2660         vec = intr_handle->intr_vec[queue_id];
2661         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2662         if (rc && rc != -EEXIST) {
2663                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2664                                 " op %d epfd %d vec %u\n",
2665                                 port_id, queue_id, op, epfd, vec);
2666                 return rc;
2667         }
2668
2669         return 0;
2670 }
2671
2672 int
2673 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2674                            uint16_t queue_id)
2675 {
2676         struct rte_eth_dev *dev;
2677
2678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679
2680         dev = &rte_eth_devices[port_id];
2681
2682         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2683         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2684 }
2685
2686 int
2687 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2688                             uint16_t queue_id)
2689 {
2690         struct rte_eth_dev *dev;
2691
2692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2693
2694         dev = &rte_eth_devices[port_id];
2695
2696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2697         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2698 }
2699
2700 #ifdef RTE_NIC_BYPASS
2701 int rte_eth_dev_bypass_init(uint8_t port_id)
2702 {
2703         struct rte_eth_dev *dev;
2704
2705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2706
2707         dev = &rte_eth_devices[port_id];
2708         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2709         (*dev->dev_ops->bypass_init)(dev);
2710         return 0;
2711 }
2712
2713 int
2714 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2715 {
2716         struct rte_eth_dev *dev;
2717
2718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2719
2720         dev = &rte_eth_devices[port_id];
2721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2722         (*dev->dev_ops->bypass_state_show)(dev, state);
2723         return 0;
2724 }
2725
2726 int
2727 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2728 {
2729         struct rte_eth_dev *dev;
2730
2731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2732
2733         dev = &rte_eth_devices[port_id];
2734         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2735         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2736         return 0;
2737 }
2738
2739 int
2740 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2741 {
2742         struct rte_eth_dev *dev;
2743
2744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2745
2746         dev = &rte_eth_devices[port_id];
2747         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2748         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2749         return 0;
2750 }
2751
2752 int
2753 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2754 {
2755         struct rte_eth_dev *dev;
2756
2757         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2758
2759         dev = &rte_eth_devices[port_id];
2760
2761         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2762         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2763         return 0;
2764 }
2765
2766 int
2767 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2768 {
2769         struct rte_eth_dev *dev;
2770
2771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2772
2773         dev = &rte_eth_devices[port_id];
2774
2775         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2776         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2777         return 0;
2778 }
2779
2780 int
2781 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2782 {
2783         struct rte_eth_dev *dev;
2784
2785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2786
2787         dev = &rte_eth_devices[port_id];
2788
2789         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2790         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2791         return 0;
2792 }
2793
2794 int
2795 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2796 {
2797         struct rte_eth_dev *dev;
2798
2799         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2800
2801         dev = &rte_eth_devices[port_id];
2802
2803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2804         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2805         return 0;
2806 }
2807
2808 int
2809 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2810 {
2811         struct rte_eth_dev *dev;
2812
2813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2814
2815         dev = &rte_eth_devices[port_id];
2816
2817         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2818         (*dev->dev_ops->bypass_wd_reset)(dev);
2819         return 0;
2820 }
2821 #endif
2822
2823 int
2824 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2825 {
2826         struct rte_eth_dev *dev;
2827
2828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2829
2830         dev = &rte_eth_devices[port_id];
2831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2832         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2833                                 RTE_ETH_FILTER_NOP, NULL);
2834 }
2835
2836 int
2837 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2838                        enum rte_filter_op filter_op, void *arg)
2839 {
2840         struct rte_eth_dev *dev;
2841
2842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2843
2844         dev = &rte_eth_devices[port_id];
2845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2846         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2847 }
2848
2849 void *
2850 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2851                 rte_rx_callback_fn fn, void *user_param)
2852 {
2853 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2854         rte_errno = ENOTSUP;
2855         return NULL;
2856 #endif
2857         /* check input parameters */
2858         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2859                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2860                 rte_errno = EINVAL;
2861                 return NULL;
2862         }
2863         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2864
2865         if (cb == NULL) {
2866                 rte_errno = ENOMEM;
2867                 return NULL;
2868         }
2869
2870         cb->fn.rx = fn;
2871         cb->param = user_param;
2872
2873         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2874         /* Add the callbacks in fifo order. */
2875         struct rte_eth_rxtx_callback *tail =
2876                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2877
2878         if (!tail) {
2879                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2880
2881         } else {
2882                 while (tail->next)
2883                         tail = tail->next;
2884                 tail->next = cb;
2885         }
2886         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2887
2888         return cb;
2889 }
2890
2891 void *
2892 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2893                 rte_rx_callback_fn fn, void *user_param)
2894 {
2895 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2896         rte_errno = ENOTSUP;
2897         return NULL;
2898 #endif
2899         /* check input parameters */
2900         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2901                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2902                 rte_errno = EINVAL;
2903                 return NULL;
2904         }
2905
2906         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2907
2908         if (cb == NULL) {
2909                 rte_errno = ENOMEM;
2910                 return NULL;
2911         }
2912
2913         cb->fn.rx = fn;
2914         cb->param = user_param;
2915
2916         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2917         /* Add the callbacks at fisrt position*/
2918         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2919         rte_smp_wmb();
2920         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2921         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2922
2923         return cb;
2924 }
2925
2926 void *
2927 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2928                 rte_tx_callback_fn fn, void *user_param)
2929 {
2930 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2931         rte_errno = ENOTSUP;
2932         return NULL;
2933 #endif
2934         /* check input parameters */
2935         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2936                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2937                 rte_errno = EINVAL;
2938                 return NULL;
2939         }
2940
2941         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2942
2943         if (cb == NULL) {
2944                 rte_errno = ENOMEM;
2945                 return NULL;
2946         }
2947
2948         cb->fn.tx = fn;
2949         cb->param = user_param;
2950
2951         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2952         /* Add the callbacks in fifo order. */
2953         struct rte_eth_rxtx_callback *tail =
2954                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2955
2956         if (!tail) {
2957                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2958
2959         } else {
2960                 while (tail->next)
2961                         tail = tail->next;
2962                 tail->next = cb;
2963         }
2964         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2965
2966         return cb;
2967 }
2968
2969 int
2970 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2971                 struct rte_eth_rxtx_callback *user_cb)
2972 {
2973 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2974         return -ENOTSUP;
2975 #endif
2976         /* Check input parameters. */
2977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2978         if (user_cb == NULL ||
2979                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2980                 return -EINVAL;
2981
2982         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2983         struct rte_eth_rxtx_callback *cb;
2984         struct rte_eth_rxtx_callback **prev_cb;
2985         int ret = -EINVAL;
2986
2987         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2988         prev_cb = &dev->post_rx_burst_cbs[queue_id];
2989         for (; *prev_cb != NULL; prev_cb = &cb->next) {
2990                 cb = *prev_cb;
2991                 if (cb == user_cb) {
2992                         /* Remove the user cb from the callback list. */
2993                         *prev_cb = cb->next;
2994                         ret = 0;
2995                         break;
2996                 }
2997         }
2998         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2999
3000         return ret;
3001 }
3002
3003 int
3004 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3005                 struct rte_eth_rxtx_callback *user_cb)
3006 {
3007 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3008         return -ENOTSUP;
3009 #endif
3010         /* Check input parameters. */
3011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3012         if (user_cb == NULL ||
3013                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3014                 return -EINVAL;
3015
3016         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3017         int ret = -EINVAL;
3018         struct rte_eth_rxtx_callback *cb;
3019         struct rte_eth_rxtx_callback **prev_cb;
3020
3021         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3022         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3023         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3024                 cb = *prev_cb;
3025                 if (cb == user_cb) {
3026                         /* Remove the user cb from the callback list. */
3027                         *prev_cb = cb->next;
3028                         ret = 0;
3029                         break;
3030                 }
3031         }
3032         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3033
3034         return ret;
3035 }
3036
3037 int
3038 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3039         struct rte_eth_rxq_info *qinfo)
3040 {
3041         struct rte_eth_dev *dev;
3042
3043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3044
3045         if (qinfo == NULL)
3046                 return -EINVAL;
3047
3048         dev = &rte_eth_devices[port_id];
3049         if (queue_id >= dev->data->nb_rx_queues) {
3050                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3051                 return -EINVAL;
3052         }
3053
3054         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3055
3056         memset(qinfo, 0, sizeof(*qinfo));
3057         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3058         return 0;
3059 }
3060
3061 int
3062 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3063         struct rte_eth_txq_info *qinfo)
3064 {
3065         struct rte_eth_dev *dev;
3066
3067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3068
3069         if (qinfo == NULL)
3070                 return -EINVAL;
3071
3072         dev = &rte_eth_devices[port_id];
3073         if (queue_id >= dev->data->nb_tx_queues) {
3074                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3075                 return -EINVAL;
3076         }
3077
3078         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3079
3080         memset(qinfo, 0, sizeof(*qinfo));
3081         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3082         return 0;
3083 }
3084
3085 int
3086 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3087                              struct ether_addr *mc_addr_set,
3088                              uint32_t nb_mc_addr)
3089 {
3090         struct rte_eth_dev *dev;
3091
3092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3093
3094         dev = &rte_eth_devices[port_id];
3095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3096         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3097 }
3098
3099 int
3100 rte_eth_timesync_enable(uint8_t port_id)
3101 {
3102         struct rte_eth_dev *dev;
3103
3104         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3105         dev = &rte_eth_devices[port_id];
3106
3107         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3108         return (*dev->dev_ops->timesync_enable)(dev);
3109 }
3110
3111 int
3112 rte_eth_timesync_disable(uint8_t port_id)
3113 {
3114         struct rte_eth_dev *dev;
3115
3116         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3117         dev = &rte_eth_devices[port_id];
3118
3119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3120         return (*dev->dev_ops->timesync_disable)(dev);
3121 }
3122
3123 int
3124 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3125                                    uint32_t flags)
3126 {
3127         struct rte_eth_dev *dev;
3128
3129         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3130         dev = &rte_eth_devices[port_id];
3131
3132         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3133         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3134 }
3135
3136 int
3137 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3138 {
3139         struct rte_eth_dev *dev;
3140
3141         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3142         dev = &rte_eth_devices[port_id];
3143
3144         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3145         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3146 }
3147
3148 int
3149 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3150 {
3151         struct rte_eth_dev *dev;
3152
3153         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3154         dev = &rte_eth_devices[port_id];
3155
3156         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3157         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3158 }
3159
3160 int
3161 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3162 {
3163         struct rte_eth_dev *dev;
3164
3165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3166         dev = &rte_eth_devices[port_id];
3167
3168         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3169         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3170 }
3171
3172 int
3173 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3174 {
3175         struct rte_eth_dev *dev;
3176
3177         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3178         dev = &rte_eth_devices[port_id];
3179
3180         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3181         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3182 }
3183
3184 int
3185 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3186 {
3187         struct rte_eth_dev *dev;
3188
3189         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3190
3191         dev = &rte_eth_devices[port_id];
3192         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3193         return (*dev->dev_ops->get_reg)(dev, info);
3194 }
3195
3196 int
3197 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3198 {
3199         struct rte_eth_dev *dev;
3200
3201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3202
3203         dev = &rte_eth_devices[port_id];
3204         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3205         return (*dev->dev_ops->get_eeprom_length)(dev);
3206 }
3207
3208 int
3209 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3210 {
3211         struct rte_eth_dev *dev;
3212
3213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3214
3215         dev = &rte_eth_devices[port_id];
3216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3217         return (*dev->dev_ops->get_eeprom)(dev, info);
3218 }
3219
3220 int
3221 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3222 {
3223         struct rte_eth_dev *dev;
3224
3225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3226
3227         dev = &rte_eth_devices[port_id];
3228         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3229         return (*dev->dev_ops->set_eeprom)(dev, info);
3230 }
3231
3232 int
3233 rte_eth_dev_get_dcb_info(uint8_t port_id,
3234                              struct rte_eth_dcb_info *dcb_info)
3235 {
3236         struct rte_eth_dev *dev;
3237
3238         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3239
3240         dev = &rte_eth_devices[port_id];
3241         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3242
3243         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3244         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3245 }
3246
3247 void
3248 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3249 {
3250         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3251                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3252                                 eth_dev, pci_dev);
3253                 return;
3254         }
3255
3256         eth_dev->data->dev_flags = 0;
3257         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3258                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3259         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3260                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3261
3262         eth_dev->data->kdrv = pci_dev->kdrv;
3263         eth_dev->data->numa_node = pci_dev->device.numa_node;
3264         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3265 }
3266
3267 int
3268 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3269                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3270 {
3271         struct rte_eth_dev *dev;
3272
3273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3274         if (l2_tunnel == NULL) {
3275                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3276                 return -EINVAL;
3277         }
3278
3279         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3280                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3281                 return -EINVAL;
3282         }
3283
3284         dev = &rte_eth_devices[port_id];
3285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3286                                 -ENOTSUP);
3287         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3288 }
3289
3290 int
3291 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3292                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3293                                   uint32_t mask,
3294                                   uint8_t en)
3295 {
3296         struct rte_eth_dev *dev;
3297
3298         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3299
3300         if (l2_tunnel == NULL) {
3301                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3302                 return -EINVAL;
3303         }
3304
3305         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3306                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3307                 return -EINVAL;
3308         }
3309
3310         if (mask == 0) {
3311                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3312                 return -EINVAL;
3313         }
3314
3315         dev = &rte_eth_devices[port_id];
3316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3317                                 -ENOTSUP);
3318         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3319 }