63320cc35be578b8cd79e26397bf7fe466ceee64
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
73 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
74 static struct rte_eth_dev_data *rte_eth_dev_data;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* store statistics names and its offset in stats structure  */
81 struct rte_eth_xstats_name_off {
82         char name[RTE_ETH_XSTATS_NAME_SIZE];
83         unsigned offset;
84 };
85
86 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
87         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
88         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
89         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
90         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
91         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
92         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
93         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
94                 rx_nombuf)},
95 };
96
97 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
98
99 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
102         {"errors", offsetof(struct rte_eth_stats, q_errors)},
103 };
104
105 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
106                 sizeof(rte_rxq_stats_strings[0]))
107
108 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
109         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
110         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
111 };
112 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
113                 sizeof(rte_txq_stats_strings[0]))
114
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 enum {
136         DEV_DETACHED = 0,
137         DEV_ATTACHED
138 };
139
140 static void
141 rte_eth_dev_data_alloc(void)
142 {
143         const unsigned flags = 0;
144         const struct rte_memzone *mz;
145
146         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
147                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
148                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
149                                 rte_socket_id(), flags);
150         } else
151                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
152         if (mz == NULL)
153                 rte_panic("Cannot allocate memzone for ethernet port data\n");
154
155         rte_eth_dev_data = mz->addr;
156         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
157                 memset(rte_eth_dev_data, 0,
158                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
159 }
160
161 struct rte_eth_dev *
162 rte_eth_dev_allocated(const char *name)
163 {
164         unsigned i;
165
166         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
167                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
168                     strcmp(rte_eth_devices[i].data->name, name) == 0)
169                         return &rte_eth_devices[i];
170         }
171         return NULL;
172 }
173
174 static uint8_t
175 rte_eth_dev_find_free_port(void)
176 {
177         unsigned i;
178
179         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
180                 if (rte_eth_devices[i].attached == DEV_DETACHED)
181                         return i;
182         }
183         return RTE_MAX_ETHPORTS;
184 }
185
186 struct rte_eth_dev *
187 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
188 {
189         uint8_t port_id;
190         struct rte_eth_dev *eth_dev;
191
192         port_id = rte_eth_dev_find_free_port();
193         if (port_id == RTE_MAX_ETHPORTS) {
194                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
195                 return NULL;
196         }
197
198         if (rte_eth_dev_data == NULL)
199                 rte_eth_dev_data_alloc();
200
201         if (rte_eth_dev_allocated(name) != NULL) {
202                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
203                                 name);
204                 return NULL;
205         }
206
207         eth_dev = &rte_eth_devices[port_id];
208         eth_dev->data = &rte_eth_dev_data[port_id];
209         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
210         eth_dev->data->port_id = port_id;
211         eth_dev->attached = DEV_ATTACHED;
212         eth_dev->dev_type = type;
213         nb_ports++;
214         return eth_dev;
215 }
216
217 static int
218 rte_eth_dev_create_unique_device_name(char *name, size_t size,
219                 struct rte_pci_device *pci_dev)
220 {
221         int ret;
222
223         ret = snprintf(name, size, "%d:%d.%d",
224                         pci_dev->addr.bus, pci_dev->addr.devid,
225                         pci_dev->addr.function);
226         if (ret < 0)
227                 return ret;
228         return 0;
229 }
230
231 int
232 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
233 {
234         if (eth_dev == NULL)
235                 return -EINVAL;
236
237         eth_dev->attached = DEV_DETACHED;
238         nb_ports--;
239         return 0;
240 }
241
242 static int
243 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
244                  struct rte_pci_device *pci_dev)
245 {
246         struct eth_driver    *eth_drv;
247         struct rte_eth_dev *eth_dev;
248         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
249
250         int diag;
251
252         eth_drv = (struct eth_driver *)pci_drv;
253
254         /* Create unique Ethernet device name using PCI address */
255         rte_eth_dev_create_unique_device_name(ethdev_name,
256                         sizeof(ethdev_name), pci_dev);
257
258         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
259         if (eth_dev == NULL)
260                 return -ENOMEM;
261
262         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
263                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
264                                   eth_drv->dev_private_size,
265                                   RTE_CACHE_LINE_SIZE);
266                 if (eth_dev->data->dev_private == NULL)
267                         rte_panic("Cannot allocate memzone for private port data\n");
268         }
269         eth_dev->pci_dev = pci_dev;
270         eth_dev->driver = eth_drv;
271         eth_dev->data->rx_mbuf_alloc_failed = 0;
272
273         /* init user callbacks */
274         TAILQ_INIT(&(eth_dev->link_intr_cbs));
275
276         /*
277          * Set the default MTU.
278          */
279         eth_dev->data->mtu = ETHER_MTU;
280
281         /* Invoke PMD device initialization function */
282         diag = (*eth_drv->eth_dev_init)(eth_dev);
283         if (diag == 0)
284                 return 0;
285
286         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
287                         pci_drv->name,
288                         (unsigned) pci_dev->id.vendor_id,
289                         (unsigned) pci_dev->id.device_id);
290         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
291                 rte_free(eth_dev->data->dev_private);
292         rte_eth_dev_release_port(eth_dev);
293         return diag;
294 }
295
296 static int
297 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
298 {
299         const struct eth_driver *eth_drv;
300         struct rte_eth_dev *eth_dev;
301         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
302         int ret;
303
304         if (pci_dev == NULL)
305                 return -EINVAL;
306
307         /* Create unique Ethernet device name using PCI address */
308         rte_eth_dev_create_unique_device_name(ethdev_name,
309                         sizeof(ethdev_name), pci_dev);
310
311         eth_dev = rte_eth_dev_allocated(ethdev_name);
312         if (eth_dev == NULL)
313                 return -ENODEV;
314
315         eth_drv = (const struct eth_driver *)pci_dev->driver;
316
317         /* Invoke PMD device uninit function */
318         if (*eth_drv->eth_dev_uninit) {
319                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
320                 if (ret)
321                         return ret;
322         }
323
324         /* free ether device */
325         rte_eth_dev_release_port(eth_dev);
326
327         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
328                 rte_free(eth_dev->data->dev_private);
329
330         eth_dev->pci_dev = NULL;
331         eth_dev->driver = NULL;
332         eth_dev->data = NULL;
333
334         return 0;
335 }
336
337 /**
338  * Register an Ethernet [Poll Mode] driver.
339  *
340  * Function invoked by the initialization function of an Ethernet driver
341  * to simultaneously register itself as a PCI driver and as an Ethernet
342  * Poll Mode Driver.
343  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
344  * structure embedded in the *eth_drv* structure, after having stored the
345  * address of the rte_eth_dev_init() function in the *devinit* field of
346  * the *pci_drv* structure.
347  * During the PCI probing phase, the rte_eth_dev_init() function is
348  * invoked for each PCI [Ethernet device] matching the embedded PCI
349  * identifiers provided by the driver.
350  */
351 void
352 rte_eth_driver_register(struct eth_driver *eth_drv)
353 {
354         eth_drv->pci_drv.devinit = rte_eth_dev_init;
355         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
356         rte_eal_pci_register(&eth_drv->pci_drv);
357 }
358
359 int
360 rte_eth_dev_is_valid_port(uint8_t port_id)
361 {
362         if (port_id >= RTE_MAX_ETHPORTS ||
363             rte_eth_devices[port_id].attached != DEV_ATTACHED)
364                 return 0;
365         else
366                 return 1;
367 }
368
369 int
370 rte_eth_dev_socket_id(uint8_t port_id)
371 {
372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
373         return rte_eth_devices[port_id].data->numa_node;
374 }
375
376 uint8_t
377 rte_eth_dev_count(void)
378 {
379         return nb_ports;
380 }
381
382 static enum rte_eth_dev_type
383 rte_eth_dev_get_device_type(uint8_t port_id)
384 {
385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, RTE_ETH_DEV_UNKNOWN);
386         return rte_eth_devices[port_id].dev_type;
387 }
388
389 static int
390 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
391 {
392         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
393
394         if (addr == NULL) {
395                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
396                 return -EINVAL;
397         }
398
399         *addr = rte_eth_devices[port_id].pci_dev->addr;
400         return 0;
401 }
402
403 static int
404 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
405 {
406         char *tmp;
407
408         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
409
410         if (name == NULL) {
411                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
412                 return -EINVAL;
413         }
414
415         /* shouldn't check 'rte_eth_devices[i].data',
416          * because it might be overwritten by VDEV PMD */
417         tmp = rte_eth_dev_data[port_id].name;
418         strcpy(name, tmp);
419         return 0;
420 }
421
422 static int
423 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
424 {
425         int i;
426
427         if (name == NULL) {
428                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
429                 return -EINVAL;
430         }
431
432         *port_id = RTE_MAX_ETHPORTS;
433
434         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
435
436                 if (!strncmp(name,
437                         rte_eth_dev_data[i].name, strlen(name))) {
438
439                         *port_id = i;
440
441                         return 0;
442                 }
443         }
444         return -ENODEV;
445 }
446
447 static int
448 rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
449 {
450         int i;
451         struct rte_pci_device *pci_dev = NULL;
452
453         if (addr == NULL) {
454                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
455                 return -EINVAL;
456         }
457
458         *port_id = RTE_MAX_ETHPORTS;
459
460         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
461
462                 pci_dev = rte_eth_devices[i].pci_dev;
463
464                 if (pci_dev &&
465                         !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
466
467                         *port_id = i;
468
469                         return 0;
470                 }
471         }
472         return -ENODEV;
473 }
474
475 static int
476 rte_eth_dev_is_detachable(uint8_t port_id)
477 {
478         uint32_t dev_flags;
479
480         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
481
482         switch (rte_eth_devices[port_id].data->kdrv) {
483         case RTE_KDRV_IGB_UIO:
484         case RTE_KDRV_UIO_GENERIC:
485         case RTE_KDRV_NIC_UIO:
486         case RTE_KDRV_NONE:
487                 break;
488         case RTE_KDRV_VFIO:
489         default:
490                 return -ENOTSUP;
491         }
492         dev_flags = rte_eth_devices[port_id].data->dev_flags;
493         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
494                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
495                 return 0;
496         else
497                 return 1;
498 }
499
500 /* attach the new physical device, then store port_id of the device */
501 static int
502 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
503 {
504         /* re-construct pci_device_list */
505         if (rte_eal_pci_scan())
506                 goto err;
507         /* Invoke probe func of the driver can handle the new device. */
508         if (rte_eal_pci_probe_one(addr))
509                 goto err;
510
511         if (rte_eth_dev_get_port_by_addr(addr, port_id))
512                 goto err;
513
514         return 0;
515 err:
516         return -1;
517 }
518
519 /* detach the new physical device, then store pci_addr of the device */
520 static int
521 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
522 {
523         struct rte_pci_addr freed_addr;
524         struct rte_pci_addr vp;
525
526         /* get pci address by port id */
527         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
528                 goto err;
529
530         /* Zeroed pci addr means the port comes from virtual device */
531         vp.domain = vp.bus = vp.devid = vp.function = 0;
532         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
533                 goto err;
534
535         /* invoke devuninit func of the pci driver,
536          * also remove the device from pci_device_list */
537         if (rte_eal_pci_detach(&freed_addr))
538                 goto err;
539
540         *addr = freed_addr;
541         return 0;
542 err:
543         return -1;
544 }
545
546 /* attach the new virtual device, then store port_id of the device */
547 static int
548 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
549 {
550         char *name = NULL, *args = NULL;
551         int ret = -1;
552
553         /* parse vdevargs, then retrieve device name and args */
554         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
555                 goto end;
556
557         /* walk around dev_driver_list to find the driver of the device,
558          * then invoke probe function of the driver.
559          * rte_eal_vdev_init() updates port_id allocated after
560          * initialization.
561          */
562         if (rte_eal_vdev_init(name, args))
563                 goto end;
564
565         if (rte_eth_dev_get_port_by_name(name, port_id))
566                 goto end;
567
568         ret = 0;
569 end:
570         free(name);
571         free(args);
572
573         return ret;
574 }
575
576 /* detach the new virtual device, then store the name of the device */
577 static int
578 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
579 {
580         char name[RTE_ETH_NAME_MAX_LEN];
581
582         /* get device name by port id */
583         if (rte_eth_dev_get_name_by_port(port_id, name))
584                 goto err;
585         /* walk around dev_driver_list to find the driver of the device,
586          * then invoke uninit function of the driver */
587         if (rte_eal_vdev_uninit(name))
588                 goto err;
589
590         strncpy(vdevname, name, sizeof(name));
591         return 0;
592 err:
593         return -1;
594 }
595
596 /* attach the new device, then store port_id of the device */
597 int
598 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
599 {
600         struct rte_pci_addr addr;
601         int ret = -1;
602
603         if ((devargs == NULL) || (port_id == NULL)) {
604                 ret = -EINVAL;
605                 goto err;
606         }
607
608         if (eal_parse_pci_DomBDF(devargs, &addr) == 0) {
609                 ret = rte_eth_dev_attach_pdev(&addr, port_id);
610                 if (ret < 0)
611                         goto err;
612         } else {
613                 ret = rte_eth_dev_attach_vdev(devargs, port_id);
614                 if (ret < 0)
615                         goto err;
616         }
617
618         return 0;
619 err:
620         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
621         return ret;
622 }
623
624 /* detach the device, then store the name of the device */
625 int
626 rte_eth_dev_detach(uint8_t port_id, char *name)
627 {
628         struct rte_pci_addr addr;
629         int ret = -1;
630
631         if (name == NULL) {
632                 ret = -EINVAL;
633                 goto err;
634         }
635
636         /* check whether the driver supports detach feature, or not */
637         if (rte_eth_dev_is_detachable(port_id))
638                 goto err;
639
640         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
641                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
642                 if (ret < 0)
643                         goto err;
644
645                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
646                 if (ret < 0)
647                         goto err;
648
649                 snprintf(name, RTE_ETH_NAME_MAX_LEN,
650                         "%04x:%02x:%02x.%d",
651                         addr.domain, addr.bus,
652                         addr.devid, addr.function);
653         } else {
654                 ret = rte_eth_dev_detach_vdev(port_id, name);
655                 if (ret < 0)
656                         goto err;
657         }
658
659         return 0;
660
661 err:
662         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
663         return ret;
664 }
665
666 static int
667 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
668 {
669         uint16_t old_nb_queues = dev->data->nb_rx_queues;
670         void **rxq;
671         unsigned i;
672
673         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
674                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
675                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
676                                 RTE_CACHE_LINE_SIZE);
677                 if (dev->data->rx_queues == NULL) {
678                         dev->data->nb_rx_queues = 0;
679                         return -(ENOMEM);
680                 }
681         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
682                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
683
684                 rxq = dev->data->rx_queues;
685
686                 for (i = nb_queues; i < old_nb_queues; i++)
687                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
688                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
689                                 RTE_CACHE_LINE_SIZE);
690                 if (rxq == NULL)
691                         return -(ENOMEM);
692                 if (nb_queues > old_nb_queues) {
693                         uint16_t new_qs = nb_queues - old_nb_queues;
694
695                         memset(rxq + old_nb_queues, 0,
696                                 sizeof(rxq[0]) * new_qs);
697                 }
698
699                 dev->data->rx_queues = rxq;
700
701         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
702                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
703
704                 rxq = dev->data->rx_queues;
705
706                 for (i = nb_queues; i < old_nb_queues; i++)
707                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
708         }
709         dev->data->nb_rx_queues = nb_queues;
710         return 0;
711 }
712
713 int
714 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
715 {
716         struct rte_eth_dev *dev;
717
718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
719
720         dev = &rte_eth_devices[port_id];
721         if (rx_queue_id >= dev->data->nb_rx_queues) {
722                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
723                 return -EINVAL;
724         }
725
726         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
727
728         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
729                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
730                         " already started\n",
731                         rx_queue_id, port_id);
732                 return 0;
733         }
734
735         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
736
737 }
738
739 int
740 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
741 {
742         struct rte_eth_dev *dev;
743
744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
745
746         dev = &rte_eth_devices[port_id];
747         if (rx_queue_id >= dev->data->nb_rx_queues) {
748                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
749                 return -EINVAL;
750         }
751
752         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
753
754         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
755                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
756                         " already stopped\n",
757                         rx_queue_id, port_id);
758                 return 0;
759         }
760
761         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
762
763 }
764
765 int
766 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
767 {
768         struct rte_eth_dev *dev;
769
770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
771
772         dev = &rte_eth_devices[port_id];
773         if (tx_queue_id >= dev->data->nb_tx_queues) {
774                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
775                 return -EINVAL;
776         }
777
778         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
779
780         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
781                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
782                         " already started\n",
783                         tx_queue_id, port_id);
784                 return 0;
785         }
786
787         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
788
789 }
790
791 int
792 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
793 {
794         struct rte_eth_dev *dev;
795
796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
797
798         dev = &rte_eth_devices[port_id];
799         if (tx_queue_id >= dev->data->nb_tx_queues) {
800                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
801                 return -EINVAL;
802         }
803
804         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
805
806         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
807                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
808                         " already stopped\n",
809                         tx_queue_id, port_id);
810                 return 0;
811         }
812
813         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
814
815 }
816
817 static int
818 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
819 {
820         uint16_t old_nb_queues = dev->data->nb_tx_queues;
821         void **txq;
822         unsigned i;
823
824         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
825                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
826                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
827                                                    RTE_CACHE_LINE_SIZE);
828                 if (dev->data->tx_queues == NULL) {
829                         dev->data->nb_tx_queues = 0;
830                         return -(ENOMEM);
831                 }
832         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
833                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
834
835                 txq = dev->data->tx_queues;
836
837                 for (i = nb_queues; i < old_nb_queues; i++)
838                         (*dev->dev_ops->tx_queue_release)(txq[i]);
839                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
840                                   RTE_CACHE_LINE_SIZE);
841                 if (txq == NULL)
842                         return -ENOMEM;
843                 if (nb_queues > old_nb_queues) {
844                         uint16_t new_qs = nb_queues - old_nb_queues;
845
846                         memset(txq + old_nb_queues, 0,
847                                sizeof(txq[0]) * new_qs);
848                 }
849
850                 dev->data->tx_queues = txq;
851
852         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
853                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
854
855                 txq = dev->data->tx_queues;
856
857                 for (i = nb_queues; i < old_nb_queues; i++)
858                         (*dev->dev_ops->tx_queue_release)(txq[i]);
859         }
860         dev->data->nb_tx_queues = nb_queues;
861         return 0;
862 }
863
864 uint32_t
865 rte_eth_speed_bitflag(uint32_t speed, int duplex)
866 {
867         switch (speed) {
868         case ETH_SPEED_NUM_10M:
869                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
870         case ETH_SPEED_NUM_100M:
871                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
872         case ETH_SPEED_NUM_1G:
873                 return ETH_LINK_SPEED_1G;
874         case ETH_SPEED_NUM_2_5G:
875                 return ETH_LINK_SPEED_2_5G;
876         case ETH_SPEED_NUM_5G:
877                 return ETH_LINK_SPEED_5G;
878         case ETH_SPEED_NUM_10G:
879                 return ETH_LINK_SPEED_10G;
880         case ETH_SPEED_NUM_20G:
881                 return ETH_LINK_SPEED_20G;
882         case ETH_SPEED_NUM_25G:
883                 return ETH_LINK_SPEED_25G;
884         case ETH_SPEED_NUM_40G:
885                 return ETH_LINK_SPEED_40G;
886         case ETH_SPEED_NUM_50G:
887                 return ETH_LINK_SPEED_50G;
888         case ETH_SPEED_NUM_56G:
889                 return ETH_LINK_SPEED_56G;
890         case ETH_SPEED_NUM_100G:
891                 return ETH_LINK_SPEED_100G;
892         default:
893                 return 0;
894         }
895 }
896
897 int
898 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
899                       const struct rte_eth_conf *dev_conf)
900 {
901         struct rte_eth_dev *dev;
902         struct rte_eth_dev_info dev_info;
903         int diag;
904
905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
906
907         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
908                 RTE_PMD_DEBUG_TRACE(
909                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
910                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
911                 return -EINVAL;
912         }
913
914         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
915                 RTE_PMD_DEBUG_TRACE(
916                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
917                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
918                 return -EINVAL;
919         }
920
921         dev = &rte_eth_devices[port_id];
922
923         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
925
926         if (dev->data->dev_started) {
927                 RTE_PMD_DEBUG_TRACE(
928                     "port %d must be stopped to allow configuration\n", port_id);
929                 return -EBUSY;
930         }
931
932         /* Copy the dev_conf parameter into the dev structure */
933         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
934
935         /*
936          * Check that the numbers of RX and TX queues are not greater
937          * than the maximum number of RX and TX queues supported by the
938          * configured device.
939          */
940         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
941
942         if (nb_rx_q == 0 && nb_tx_q == 0) {
943                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
944                 return -EINVAL;
945         }
946
947         if (nb_rx_q > dev_info.max_rx_queues) {
948                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
949                                 port_id, nb_rx_q, dev_info.max_rx_queues);
950                 return -EINVAL;
951         }
952
953         if (nb_tx_q > dev_info.max_tx_queues) {
954                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
955                                 port_id, nb_tx_q, dev_info.max_tx_queues);
956                 return -EINVAL;
957         }
958
959         /*
960          * If link state interrupt is enabled, check that the
961          * device supports it.
962          */
963         if ((dev_conf->intr_conf.lsc == 1) &&
964                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
965                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
966                                         dev->data->drv_name);
967                         return -EINVAL;
968         }
969
970         /*
971          * If jumbo frames are enabled, check that the maximum RX packet
972          * length is supported by the configured device.
973          */
974         if (dev_conf->rxmode.jumbo_frame == 1) {
975                 if (dev_conf->rxmode.max_rx_pkt_len >
976                     dev_info.max_rx_pktlen) {
977                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
978                                 " > max valid value %u\n",
979                                 port_id,
980                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
981                                 (unsigned)dev_info.max_rx_pktlen);
982                         return -EINVAL;
983                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
984                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
985                                 " < min valid value %u\n",
986                                 port_id,
987                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
988                                 (unsigned)ETHER_MIN_LEN);
989                         return -EINVAL;
990                 }
991         } else {
992                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
993                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
994                         /* Use default value */
995                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
996                                                         ETHER_MAX_LEN;
997         }
998
999         /*
1000          * Setup new number of RX/TX queues and reconfigure device.
1001          */
1002         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1003         if (diag != 0) {
1004                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1005                                 port_id, diag);
1006                 return diag;
1007         }
1008
1009         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1010         if (diag != 0) {
1011                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1012                                 port_id, diag);
1013                 rte_eth_dev_rx_queue_config(dev, 0);
1014                 return diag;
1015         }
1016
1017         diag = (*dev->dev_ops->dev_configure)(dev);
1018         if (diag != 0) {
1019                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1020                                 port_id, diag);
1021                 rte_eth_dev_rx_queue_config(dev, 0);
1022                 rte_eth_dev_tx_queue_config(dev, 0);
1023                 return diag;
1024         }
1025
1026         return 0;
1027 }
1028
1029 static void
1030 rte_eth_dev_config_restore(uint8_t port_id)
1031 {
1032         struct rte_eth_dev *dev;
1033         struct rte_eth_dev_info dev_info;
1034         struct ether_addr addr;
1035         uint16_t i;
1036         uint32_t pool = 0;
1037
1038         dev = &rte_eth_devices[port_id];
1039
1040         rte_eth_dev_info_get(port_id, &dev_info);
1041
1042         if (RTE_ETH_DEV_SRIOV(dev).active)
1043                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1044
1045         /* replay MAC address configuration */
1046         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1047                 addr = dev->data->mac_addrs[i];
1048
1049                 /* skip zero address */
1050                 if (is_zero_ether_addr(&addr))
1051                         continue;
1052
1053                 /* add address to the hardware */
1054                 if  (*dev->dev_ops->mac_addr_add &&
1055                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1056                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1057                 else {
1058                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1059                                         port_id);
1060                         /* exit the loop but not return an error */
1061                         break;
1062                 }
1063         }
1064
1065         /* replay promiscuous configuration */
1066         if (rte_eth_promiscuous_get(port_id) == 1)
1067                 rte_eth_promiscuous_enable(port_id);
1068         else if (rte_eth_promiscuous_get(port_id) == 0)
1069                 rte_eth_promiscuous_disable(port_id);
1070
1071         /* replay all multicast configuration */
1072         if (rte_eth_allmulticast_get(port_id) == 1)
1073                 rte_eth_allmulticast_enable(port_id);
1074         else if (rte_eth_allmulticast_get(port_id) == 0)
1075                 rte_eth_allmulticast_disable(port_id);
1076 }
1077
1078 int
1079 rte_eth_dev_start(uint8_t port_id)
1080 {
1081         struct rte_eth_dev *dev;
1082         int diag;
1083
1084         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1085
1086         dev = &rte_eth_devices[port_id];
1087
1088         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1089
1090         if (dev->data->dev_started != 0) {
1091                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1092                         " already started\n",
1093                         port_id);
1094                 return 0;
1095         }
1096
1097         diag = (*dev->dev_ops->dev_start)(dev);
1098         if (diag == 0)
1099                 dev->data->dev_started = 1;
1100         else
1101                 return diag;
1102
1103         rte_eth_dev_config_restore(port_id);
1104
1105         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1106                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1107                 (*dev->dev_ops->link_update)(dev, 0);
1108         }
1109         return 0;
1110 }
1111
1112 void
1113 rte_eth_dev_stop(uint8_t port_id)
1114 {
1115         struct rte_eth_dev *dev;
1116
1117         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1118         dev = &rte_eth_devices[port_id];
1119
1120         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1121
1122         if (dev->data->dev_started == 0) {
1123                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1124                         " already stopped\n",
1125                         port_id);
1126                 return;
1127         }
1128
1129         dev->data->dev_started = 0;
1130         (*dev->dev_ops->dev_stop)(dev);
1131 }
1132
1133 int
1134 rte_eth_dev_set_link_up(uint8_t port_id)
1135 {
1136         struct rte_eth_dev *dev;
1137
1138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1139
1140         dev = &rte_eth_devices[port_id];
1141
1142         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1143         return (*dev->dev_ops->dev_set_link_up)(dev);
1144 }
1145
1146 int
1147 rte_eth_dev_set_link_down(uint8_t port_id)
1148 {
1149         struct rte_eth_dev *dev;
1150
1151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1152
1153         dev = &rte_eth_devices[port_id];
1154
1155         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1156         return (*dev->dev_ops->dev_set_link_down)(dev);
1157 }
1158
1159 void
1160 rte_eth_dev_close(uint8_t port_id)
1161 {
1162         struct rte_eth_dev *dev;
1163
1164         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1165         dev = &rte_eth_devices[port_id];
1166
1167         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1168         dev->data->dev_started = 0;
1169         (*dev->dev_ops->dev_close)(dev);
1170
1171         rte_free(dev->data->rx_queues);
1172         dev->data->rx_queues = NULL;
1173         rte_free(dev->data->tx_queues);
1174         dev->data->tx_queues = NULL;
1175 }
1176
1177 int
1178 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1179                        uint16_t nb_rx_desc, unsigned int socket_id,
1180                        const struct rte_eth_rxconf *rx_conf,
1181                        struct rte_mempool *mp)
1182 {
1183         int ret;
1184         uint32_t mbp_buf_size;
1185         struct rte_eth_dev *dev;
1186         struct rte_eth_dev_info dev_info;
1187
1188         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1189
1190         dev = &rte_eth_devices[port_id];
1191         if (rx_queue_id >= dev->data->nb_rx_queues) {
1192                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1193                 return -EINVAL;
1194         }
1195
1196         if (dev->data->dev_started) {
1197                 RTE_PMD_DEBUG_TRACE(
1198                     "port %d must be stopped to allow configuration\n", port_id);
1199                 return -EBUSY;
1200         }
1201
1202         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1204
1205         /*
1206          * Check the size of the mbuf data buffer.
1207          * This value must be provided in the private data of the memory pool.
1208          * First check that the memory pool has a valid private data.
1209          */
1210         rte_eth_dev_info_get(port_id, &dev_info);
1211         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1212                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1213                                 mp->name, (int) mp->private_data_size,
1214                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1215                 return -ENOSPC;
1216         }
1217         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1218
1219         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1220                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1221                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1222                                 "=%d)\n",
1223                                 mp->name,
1224                                 (int)mbp_buf_size,
1225                                 (int)(RTE_PKTMBUF_HEADROOM +
1226                                       dev_info.min_rx_bufsize),
1227                                 (int)RTE_PKTMBUF_HEADROOM,
1228                                 (int)dev_info.min_rx_bufsize);
1229                 return -EINVAL;
1230         }
1231
1232         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1233                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1234                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1235
1236                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1237                         "should be: <= %hu, = %hu, and a product of %hu\n",
1238                         nb_rx_desc,
1239                         dev_info.rx_desc_lim.nb_max,
1240                         dev_info.rx_desc_lim.nb_min,
1241                         dev_info.rx_desc_lim.nb_align);
1242                 return -EINVAL;
1243         }
1244
1245         if (rx_conf == NULL)
1246                 rx_conf = &dev_info.default_rxconf;
1247
1248         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1249                                               socket_id, rx_conf, mp);
1250         if (!ret) {
1251                 if (!dev->data->min_rx_buf_size ||
1252                     dev->data->min_rx_buf_size > mbp_buf_size)
1253                         dev->data->min_rx_buf_size = mbp_buf_size;
1254         }
1255
1256         return ret;
1257 }
1258
1259 int
1260 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1261                        uint16_t nb_tx_desc, unsigned int socket_id,
1262                        const struct rte_eth_txconf *tx_conf)
1263 {
1264         struct rte_eth_dev *dev;
1265         struct rte_eth_dev_info dev_info;
1266
1267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1268
1269         dev = &rte_eth_devices[port_id];
1270         if (tx_queue_id >= dev->data->nb_tx_queues) {
1271                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1272                 return -EINVAL;
1273         }
1274
1275         if (dev->data->dev_started) {
1276                 RTE_PMD_DEBUG_TRACE(
1277                     "port %d must be stopped to allow configuration\n", port_id);
1278                 return -EBUSY;
1279         }
1280
1281         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1282         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1283
1284         rte_eth_dev_info_get(port_id, &dev_info);
1285
1286         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1287             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1288             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1289                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1290                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1291                                 nb_tx_desc,
1292                                 dev_info.tx_desc_lim.nb_max,
1293                                 dev_info.tx_desc_lim.nb_min,
1294                                 dev_info.tx_desc_lim.nb_align);
1295                 return -EINVAL;
1296         }
1297
1298         if (tx_conf == NULL)
1299                 tx_conf = &dev_info.default_txconf;
1300
1301         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1302                                                socket_id, tx_conf);
1303 }
1304
1305 void
1306 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1307                 void *userdata __rte_unused)
1308 {
1309         unsigned i;
1310
1311         for (i = 0; i < unsent; i++)
1312                 rte_pktmbuf_free(pkts[i]);
1313 }
1314
1315 void
1316 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1317                 void *userdata)
1318 {
1319         uint64_t *count = userdata;
1320         unsigned i;
1321
1322         for (i = 0; i < unsent; i++)
1323                 rte_pktmbuf_free(pkts[i]);
1324
1325         *count += unsent;
1326 }
1327
1328 int
1329 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1330                 buffer_tx_error_fn cbfn, void *userdata)
1331 {
1332         buffer->error_callback = cbfn;
1333         buffer->error_userdata = userdata;
1334         return 0;
1335 }
1336
1337 int
1338 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1339 {
1340         int ret = 0;
1341
1342         if (buffer == NULL)
1343                 return -EINVAL;
1344
1345         buffer->size = size;
1346         if (buffer->error_callback == NULL) {
1347                 ret = rte_eth_tx_buffer_set_err_callback(
1348                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1349         }
1350
1351         return ret;
1352 }
1353
1354 void
1355 rte_eth_promiscuous_enable(uint8_t port_id)
1356 {
1357         struct rte_eth_dev *dev;
1358
1359         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1360         dev = &rte_eth_devices[port_id];
1361
1362         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1363         (*dev->dev_ops->promiscuous_enable)(dev);
1364         dev->data->promiscuous = 1;
1365 }
1366
1367 void
1368 rte_eth_promiscuous_disable(uint8_t port_id)
1369 {
1370         struct rte_eth_dev *dev;
1371
1372         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1373         dev = &rte_eth_devices[port_id];
1374
1375         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1376         dev->data->promiscuous = 0;
1377         (*dev->dev_ops->promiscuous_disable)(dev);
1378 }
1379
1380 int
1381 rte_eth_promiscuous_get(uint8_t port_id)
1382 {
1383         struct rte_eth_dev *dev;
1384
1385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1386
1387         dev = &rte_eth_devices[port_id];
1388         return dev->data->promiscuous;
1389 }
1390
1391 void
1392 rte_eth_allmulticast_enable(uint8_t port_id)
1393 {
1394         struct rte_eth_dev *dev;
1395
1396         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1397         dev = &rte_eth_devices[port_id];
1398
1399         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1400         (*dev->dev_ops->allmulticast_enable)(dev);
1401         dev->data->all_multicast = 1;
1402 }
1403
1404 void
1405 rte_eth_allmulticast_disable(uint8_t port_id)
1406 {
1407         struct rte_eth_dev *dev;
1408
1409         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1410         dev = &rte_eth_devices[port_id];
1411
1412         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1413         dev->data->all_multicast = 0;
1414         (*dev->dev_ops->allmulticast_disable)(dev);
1415 }
1416
1417 int
1418 rte_eth_allmulticast_get(uint8_t port_id)
1419 {
1420         struct rte_eth_dev *dev;
1421
1422         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1423
1424         dev = &rte_eth_devices[port_id];
1425         return dev->data->all_multicast;
1426 }
1427
1428 static inline int
1429 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1430                                 struct rte_eth_link *link)
1431 {
1432         struct rte_eth_link *dst = link;
1433         struct rte_eth_link *src = &(dev->data->dev_link);
1434
1435         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1436                                         *(uint64_t *)src) == 0)
1437                 return -1;
1438
1439         return 0;
1440 }
1441
1442 void
1443 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1444 {
1445         struct rte_eth_dev *dev;
1446
1447         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1448         dev = &rte_eth_devices[port_id];
1449
1450         if (dev->data->dev_conf.intr_conf.lsc != 0)
1451                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1452         else {
1453                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1454                 (*dev->dev_ops->link_update)(dev, 1);
1455                 *eth_link = dev->data->dev_link;
1456         }
1457 }
1458
1459 void
1460 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1461 {
1462         struct rte_eth_dev *dev;
1463
1464         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1465         dev = &rte_eth_devices[port_id];
1466
1467         if (dev->data->dev_conf.intr_conf.lsc != 0)
1468                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1469         else {
1470                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1471                 (*dev->dev_ops->link_update)(dev, 0);
1472                 *eth_link = dev->data->dev_link;
1473         }
1474 }
1475
1476 int
1477 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1478 {
1479         struct rte_eth_dev *dev;
1480
1481         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1482
1483         dev = &rte_eth_devices[port_id];
1484         memset(stats, 0, sizeof(*stats));
1485
1486         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1487         (*dev->dev_ops->stats_get)(dev, stats);
1488         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1489         return 0;
1490 }
1491
1492 void
1493 rte_eth_stats_reset(uint8_t port_id)
1494 {
1495         struct rte_eth_dev *dev;
1496
1497         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1498         dev = &rte_eth_devices[port_id];
1499
1500         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1501         (*dev->dev_ops->stats_reset)(dev);
1502         dev->data->rx_mbuf_alloc_failed = 0;
1503 }
1504
1505 static int
1506 get_xstats_count(uint8_t port_id)
1507 {
1508         struct rte_eth_dev *dev;
1509         int count;
1510
1511         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1512         dev = &rte_eth_devices[port_id];
1513         if (dev->dev_ops->xstats_get_names != NULL) {
1514                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1515                 if (count < 0)
1516                         return count;
1517         } else
1518                 count = 0;
1519         count += RTE_NB_STATS;
1520         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1521         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1522         return count;
1523 }
1524
1525 int
1526 rte_eth_xstats_get_names(uint8_t port_id,
1527         struct rte_eth_xstat_name *xstats_names,
1528         unsigned size)
1529 {
1530         struct rte_eth_dev *dev;
1531         int cnt_used_entries;
1532         int cnt_expected_entries;
1533         uint32_t idx, id_queue;
1534
1535         cnt_expected_entries = get_xstats_count(port_id);
1536         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1537                         (int)size < cnt_expected_entries)
1538                 return cnt_expected_entries;
1539
1540         /* port_id checked in get_xstats_count() */
1541         dev = &rte_eth_devices[port_id];
1542         if (dev->dev_ops->xstats_get_names != NULL) {
1543                 cnt_used_entries = (*dev->dev_ops->xstats_get_names)(
1544                         dev, xstats_names, size);
1545                 if (cnt_used_entries < 0)
1546                         return cnt_used_entries;
1547         } else
1548                 /* Driver itself does not support extended stats, but
1549                  * still have basic stats.
1550                  */
1551                 cnt_used_entries = 0;
1552
1553         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1554                 xstats_names[cnt_used_entries].id = cnt_used_entries;
1555                 snprintf(xstats_names[cnt_used_entries].name,
1556                         sizeof(xstats_names[0].name),
1557                         "%s", rte_stats_strings[idx].name);
1558                 cnt_used_entries++;
1559         }
1560         for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
1561                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1562                         xstats_names[cnt_used_entries].id = cnt_used_entries;
1563                         snprintf(xstats_names[cnt_used_entries].name,
1564                                 sizeof(xstats_names[0].name),
1565                                 "rx_q%u%s",
1566                                 id_queue, rte_rxq_stats_strings[idx].name);
1567                         cnt_used_entries++;
1568                 }
1569
1570         }
1571         for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
1572                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1573                         xstats_names[cnt_used_entries].id = cnt_used_entries;
1574                         snprintf(xstats_names[cnt_used_entries].name,
1575                                 sizeof(xstats_names[0].name),
1576                                 "tx_q%u%s",
1577                                 id_queue, rte_txq_stats_strings[idx].name);
1578                         cnt_used_entries++;
1579                 }
1580         }
1581         return cnt_used_entries;
1582 }
1583
1584 /* retrieve ethdev extended statistics */
1585 int
1586 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1587         unsigned n)
1588 {
1589         struct rte_eth_stats eth_stats;
1590         struct rte_eth_dev *dev;
1591         unsigned count = 0, i, q;
1592         signed xcount = 0;
1593         uint64_t val, *stats_ptr;
1594
1595         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1596
1597         dev = &rte_eth_devices[port_id];
1598
1599         /* Return generic statistics */
1600         count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1601                 (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1602
1603         /* implemented by the driver */
1604         if (dev->dev_ops->xstats_get != NULL) {
1605                 /* Retrieve the xstats from the driver at the end of the
1606                  * xstats struct.
1607                  */
1608                 xcount = (*dev->dev_ops->xstats_get)(dev,
1609                                      xstats ? xstats + count : NULL,
1610                                      (n > count) ? n - count : 0);
1611
1612                 if (xcount < 0)
1613                         return xcount;
1614         }
1615
1616         if (n < count + xcount || xstats == NULL)
1617                 return count + xcount;
1618
1619         /* now fill the xstats structure */
1620         count = 0;
1621         rte_eth_stats_get(port_id, &eth_stats);
1622
1623         /* global stats */
1624         for (i = 0; i < RTE_NB_STATS; i++) {
1625                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1626                                         rte_stats_strings[i].offset);
1627                 val = *stats_ptr;
1628                 xstats[count].id = count + xcount;
1629                 xstats[count++].value = val;
1630         }
1631
1632         /* per-rxq stats */
1633         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1634                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1635                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1636                                         rte_rxq_stats_strings[i].offset +
1637                                         q * sizeof(uint64_t));
1638                         val = *stats_ptr;
1639                         xstats[count].id = count + xcount;
1640                         xstats[count++].value = val;
1641                 }
1642         }
1643
1644         /* per-txq stats */
1645         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1646                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1647                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1648                                         rte_txq_stats_strings[i].offset +
1649                                         q * sizeof(uint64_t));
1650                         val = *stats_ptr;
1651                         xstats[count].id = count + xcount;
1652                         xstats[count++].value = val;
1653                 }
1654         }
1655
1656         return count + xcount;
1657 }
1658
1659 /* reset ethdev extended statistics */
1660 void
1661 rte_eth_xstats_reset(uint8_t port_id)
1662 {
1663         struct rte_eth_dev *dev;
1664
1665         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1666         dev = &rte_eth_devices[port_id];
1667
1668         /* implemented by the driver */
1669         if (dev->dev_ops->xstats_reset != NULL) {
1670                 (*dev->dev_ops->xstats_reset)(dev);
1671                 return;
1672         }
1673
1674         /* fallback to default */
1675         rte_eth_stats_reset(port_id);
1676 }
1677
1678 static int
1679 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1680                 uint8_t is_rx)
1681 {
1682         struct rte_eth_dev *dev;
1683
1684         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1685
1686         dev = &rte_eth_devices[port_id];
1687
1688         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1689         return (*dev->dev_ops->queue_stats_mapping_set)
1690                         (dev, queue_id, stat_idx, is_rx);
1691 }
1692
1693
1694 int
1695 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1696                 uint8_t stat_idx)
1697 {
1698         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1699                         STAT_QMAP_TX);
1700 }
1701
1702
1703 int
1704 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1705                 uint8_t stat_idx)
1706 {
1707         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1708                         STAT_QMAP_RX);
1709 }
1710
1711
1712 void
1713 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1714 {
1715         struct rte_eth_dev *dev;
1716         const struct rte_eth_desc_lim lim = {
1717                 .nb_max = UINT16_MAX,
1718                 .nb_min = 0,
1719                 .nb_align = 1,
1720         };
1721
1722         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1723         dev = &rte_eth_devices[port_id];
1724
1725         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1726         dev_info->rx_desc_lim = lim;
1727         dev_info->tx_desc_lim = lim;
1728
1729         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1730         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1731         dev_info->pci_dev = dev->pci_dev;
1732         dev_info->driver_name = dev->data->drv_name;
1733 }
1734
1735 int
1736 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1737                                  uint32_t *ptypes, int num)
1738 {
1739         int i, j;
1740         struct rte_eth_dev *dev;
1741         const uint32_t *all_ptypes;
1742
1743         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1744         dev = &rte_eth_devices[port_id];
1745         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1746         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1747
1748         if (!all_ptypes)
1749                 return 0;
1750
1751         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1752                 if (all_ptypes[i] & ptype_mask) {
1753                         if (j < num)
1754                                 ptypes[j] = all_ptypes[i];
1755                         j++;
1756                 }
1757
1758         return j;
1759 }
1760
1761 void
1762 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1763 {
1764         struct rte_eth_dev *dev;
1765
1766         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1767         dev = &rte_eth_devices[port_id];
1768         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1769 }
1770
1771
1772 int
1773 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1774 {
1775         struct rte_eth_dev *dev;
1776
1777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1778
1779         dev = &rte_eth_devices[port_id];
1780         *mtu = dev->data->mtu;
1781         return 0;
1782 }
1783
1784 int
1785 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1786 {
1787         int ret;
1788         struct rte_eth_dev *dev;
1789
1790         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1791         dev = &rte_eth_devices[port_id];
1792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1793
1794         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1795         if (!ret)
1796                 dev->data->mtu = mtu;
1797
1798         return ret;
1799 }
1800
1801 int
1802 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1803 {
1804         struct rte_eth_dev *dev;
1805
1806         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1807         dev = &rte_eth_devices[port_id];
1808         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1809                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1810                 return -ENOSYS;
1811         }
1812
1813         if (vlan_id > 4095) {
1814                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1815                                 port_id, (unsigned) vlan_id);
1816                 return -EINVAL;
1817         }
1818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1819
1820         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1821 }
1822
1823 int
1824 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1825 {
1826         struct rte_eth_dev *dev;
1827
1828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1829         dev = &rte_eth_devices[port_id];
1830         if (rx_queue_id >= dev->data->nb_rx_queues) {
1831                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1832                 return -EINVAL;
1833         }
1834
1835         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1836         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1837
1838         return 0;
1839 }
1840
1841 int
1842 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1843                                 enum rte_vlan_type vlan_type,
1844                                 uint16_t tpid)
1845 {
1846         struct rte_eth_dev *dev;
1847
1848         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1849         dev = &rte_eth_devices[port_id];
1850         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1851
1852         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1853 }
1854
1855 int
1856 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1857 {
1858         struct rte_eth_dev *dev;
1859         int ret = 0;
1860         int mask = 0;
1861         int cur, org = 0;
1862
1863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1864         dev = &rte_eth_devices[port_id];
1865
1866         /*check which option changed by application*/
1867         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1868         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1869         if (cur != org) {
1870                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1871                 mask |= ETH_VLAN_STRIP_MASK;
1872         }
1873
1874         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1875         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1876         if (cur != org) {
1877                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1878                 mask |= ETH_VLAN_FILTER_MASK;
1879         }
1880
1881         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1882         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1883         if (cur != org) {
1884                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1885                 mask |= ETH_VLAN_EXTEND_MASK;
1886         }
1887
1888         /*no change*/
1889         if (mask == 0)
1890                 return ret;
1891
1892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1893         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1894
1895         return ret;
1896 }
1897
1898 int
1899 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1900 {
1901         struct rte_eth_dev *dev;
1902         int ret = 0;
1903
1904         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1905         dev = &rte_eth_devices[port_id];
1906
1907         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1908                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1909
1910         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1911                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1912
1913         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1914                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1915
1916         return ret;
1917 }
1918
1919 int
1920 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1921 {
1922         struct rte_eth_dev *dev;
1923
1924         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1925         dev = &rte_eth_devices[port_id];
1926         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1927         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1928
1929         return 0;
1930 }
1931
1932 int
1933 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1934 {
1935         struct rte_eth_dev *dev;
1936
1937         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1938         dev = &rte_eth_devices[port_id];
1939         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1940         memset(fc_conf, 0, sizeof(*fc_conf));
1941         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1942 }
1943
1944 int
1945 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1946 {
1947         struct rte_eth_dev *dev;
1948
1949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1950         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1951                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1952                 return -EINVAL;
1953         }
1954
1955         dev = &rte_eth_devices[port_id];
1956         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1957         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1958 }
1959
1960 int
1961 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1962 {
1963         struct rte_eth_dev *dev;
1964
1965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1966         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1967                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1968                 return -EINVAL;
1969         }
1970
1971         dev = &rte_eth_devices[port_id];
1972         /* High water, low water validation are device specific */
1973         if  (*dev->dev_ops->priority_flow_ctrl_set)
1974                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1975         return -ENOTSUP;
1976 }
1977
1978 static int
1979 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1980                         uint16_t reta_size)
1981 {
1982         uint16_t i, num;
1983
1984         if (!reta_conf)
1985                 return -EINVAL;
1986
1987         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1988                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1989                                                         RTE_RETA_GROUP_SIZE);
1990                 return -EINVAL;
1991         }
1992
1993         num = reta_size / RTE_RETA_GROUP_SIZE;
1994         for (i = 0; i < num; i++) {
1995                 if (reta_conf[i].mask)
1996                         return 0;
1997         }
1998
1999         return -EINVAL;
2000 }
2001
2002 static int
2003 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2004                          uint16_t reta_size,
2005                          uint16_t max_rxq)
2006 {
2007         uint16_t i, idx, shift;
2008
2009         if (!reta_conf)
2010                 return -EINVAL;
2011
2012         if (max_rxq == 0) {
2013                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2014                 return -EINVAL;
2015         }
2016
2017         for (i = 0; i < reta_size; i++) {
2018                 idx = i / RTE_RETA_GROUP_SIZE;
2019                 shift = i % RTE_RETA_GROUP_SIZE;
2020                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2021                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2022                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2023                                 "the maximum rxq index: %u\n", idx, shift,
2024                                 reta_conf[idx].reta[shift], max_rxq);
2025                         return -EINVAL;
2026                 }
2027         }
2028
2029         return 0;
2030 }
2031
2032 int
2033 rte_eth_dev_rss_reta_update(uint8_t port_id,
2034                             struct rte_eth_rss_reta_entry64 *reta_conf,
2035                             uint16_t reta_size)
2036 {
2037         struct rte_eth_dev *dev;
2038         int ret;
2039
2040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2041         /* Check mask bits */
2042         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2043         if (ret < 0)
2044                 return ret;
2045
2046         dev = &rte_eth_devices[port_id];
2047
2048         /* Check entry value */
2049         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2050                                 dev->data->nb_rx_queues);
2051         if (ret < 0)
2052                 return ret;
2053
2054         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2055         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2056 }
2057
2058 int
2059 rte_eth_dev_rss_reta_query(uint8_t port_id,
2060                            struct rte_eth_rss_reta_entry64 *reta_conf,
2061                            uint16_t reta_size)
2062 {
2063         struct rte_eth_dev *dev;
2064         int ret;
2065
2066         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2067
2068         /* Check mask bits */
2069         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2070         if (ret < 0)
2071                 return ret;
2072
2073         dev = &rte_eth_devices[port_id];
2074         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2075         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2076 }
2077
2078 int
2079 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2080 {
2081         struct rte_eth_dev *dev;
2082         uint16_t rss_hash_protos;
2083
2084         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2085         rss_hash_protos = rss_conf->rss_hf;
2086         if ((rss_hash_protos != 0) &&
2087             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2088                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2089                                 rss_hash_protos);
2090                 return -EINVAL;
2091         }
2092         dev = &rte_eth_devices[port_id];
2093         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2094         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2095 }
2096
2097 int
2098 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2099                               struct rte_eth_rss_conf *rss_conf)
2100 {
2101         struct rte_eth_dev *dev;
2102
2103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2104         dev = &rte_eth_devices[port_id];
2105         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2106         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2107 }
2108
2109 int
2110 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
2111                                 struct rte_eth_udp_tunnel *udp_tunnel)
2112 {
2113         struct rte_eth_dev *dev;
2114
2115         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2116         if (udp_tunnel == NULL) {
2117                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2118                 return -EINVAL;
2119         }
2120
2121         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2122                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2123                 return -EINVAL;
2124         }
2125
2126         dev = &rte_eth_devices[port_id];
2127         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2128         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2129 }
2130
2131 int
2132 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2133                                    struct rte_eth_udp_tunnel *udp_tunnel)
2134 {
2135         struct rte_eth_dev *dev;
2136
2137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2138         dev = &rte_eth_devices[port_id];
2139
2140         if (udp_tunnel == NULL) {
2141                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2142                 return -EINVAL;
2143         }
2144
2145         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2146                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2147                 return -EINVAL;
2148         }
2149
2150         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2151         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2152 }
2153
2154 int
2155 rte_eth_led_on(uint8_t port_id)
2156 {
2157         struct rte_eth_dev *dev;
2158
2159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2160         dev = &rte_eth_devices[port_id];
2161         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2162         return (*dev->dev_ops->dev_led_on)(dev);
2163 }
2164
2165 int
2166 rte_eth_led_off(uint8_t port_id)
2167 {
2168         struct rte_eth_dev *dev;
2169
2170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2171         dev = &rte_eth_devices[port_id];
2172         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2173         return (*dev->dev_ops->dev_led_off)(dev);
2174 }
2175
2176 /*
2177  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2178  * an empty spot.
2179  */
2180 static int
2181 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2182 {
2183         struct rte_eth_dev_info dev_info;
2184         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2185         unsigned i;
2186
2187         rte_eth_dev_info_get(port_id, &dev_info);
2188
2189         for (i = 0; i < dev_info.max_mac_addrs; i++)
2190                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2191                         return i;
2192
2193         return -1;
2194 }
2195
2196 static const struct ether_addr null_mac_addr;
2197
2198 int
2199 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2200                         uint32_t pool)
2201 {
2202         struct rte_eth_dev *dev;
2203         int index;
2204         uint64_t pool_mask;
2205
2206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2207         dev = &rte_eth_devices[port_id];
2208         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2209
2210         if (is_zero_ether_addr(addr)) {
2211                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2212                         port_id);
2213                 return -EINVAL;
2214         }
2215         if (pool >= ETH_64_POOLS) {
2216                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2217                 return -EINVAL;
2218         }
2219
2220         index = get_mac_addr_index(port_id, addr);
2221         if (index < 0) {
2222                 index = get_mac_addr_index(port_id, &null_mac_addr);
2223                 if (index < 0) {
2224                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2225                                 port_id);
2226                         return -ENOSPC;
2227                 }
2228         } else {
2229                 pool_mask = dev->data->mac_pool_sel[index];
2230
2231                 /* Check if both MAC address and pool is already there, and do nothing */
2232                 if (pool_mask & (1ULL << pool))
2233                         return 0;
2234         }
2235
2236         /* Update NIC */
2237         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2238
2239         /* Update address in NIC data structure */
2240         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2241
2242         /* Update pool bitmap in NIC data structure */
2243         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2244
2245         return 0;
2246 }
2247
2248 int
2249 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2250 {
2251         struct rte_eth_dev *dev;
2252         int index;
2253
2254         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2255         dev = &rte_eth_devices[port_id];
2256         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2257
2258         index = get_mac_addr_index(port_id, addr);
2259         if (index == 0) {
2260                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2261                 return -EADDRINUSE;
2262         } else if (index < 0)
2263                 return 0;  /* Do nothing if address wasn't found */
2264
2265         /* Update NIC */
2266         (*dev->dev_ops->mac_addr_remove)(dev, index);
2267
2268         /* Update address in NIC data structure */
2269         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2270
2271         /* reset pool bitmap */
2272         dev->data->mac_pool_sel[index] = 0;
2273
2274         return 0;
2275 }
2276
2277 int
2278 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2279 {
2280         struct rte_eth_dev *dev;
2281
2282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2283
2284         if (!is_valid_assigned_ether_addr(addr))
2285                 return -EINVAL;
2286
2287         dev = &rte_eth_devices[port_id];
2288         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2289
2290         /* Update default address in NIC data structure */
2291         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2292
2293         (*dev->dev_ops->mac_addr_set)(dev, addr);
2294
2295         return 0;
2296 }
2297
2298 int
2299 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2300                                 uint16_t rx_mode, uint8_t on)
2301 {
2302         uint16_t num_vfs;
2303         struct rte_eth_dev *dev;
2304         struct rte_eth_dev_info dev_info;
2305
2306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2307
2308         dev = &rte_eth_devices[port_id];
2309         rte_eth_dev_info_get(port_id, &dev_info);
2310
2311         num_vfs = dev_info.max_vfs;
2312         if (vf > num_vfs) {
2313                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2314                 return -EINVAL;
2315         }
2316
2317         if (rx_mode == 0) {
2318                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2319                 return -EINVAL;
2320         }
2321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2322         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2323 }
2324
2325 /*
2326  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2327  * an empty spot.
2328  */
2329 static int
2330 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2331 {
2332         struct rte_eth_dev_info dev_info;
2333         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2334         unsigned i;
2335
2336         rte_eth_dev_info_get(port_id, &dev_info);
2337         if (!dev->data->hash_mac_addrs)
2338                 return -1;
2339
2340         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2341                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2342                         ETHER_ADDR_LEN) == 0)
2343                         return i;
2344
2345         return -1;
2346 }
2347
2348 int
2349 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2350                                 uint8_t on)
2351 {
2352         int index;
2353         int ret;
2354         struct rte_eth_dev *dev;
2355
2356         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2357
2358         dev = &rte_eth_devices[port_id];
2359         if (is_zero_ether_addr(addr)) {
2360                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2361                         port_id);
2362                 return -EINVAL;
2363         }
2364
2365         index = get_hash_mac_addr_index(port_id, addr);
2366         /* Check if it's already there, and do nothing */
2367         if ((index >= 0) && (on))
2368                 return 0;
2369
2370         if (index < 0) {
2371                 if (!on) {
2372                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2373                                 "set in UTA\n", port_id);
2374                         return -EINVAL;
2375                 }
2376
2377                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2378                 if (index < 0) {
2379                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2380                                         port_id);
2381                         return -ENOSPC;
2382                 }
2383         }
2384
2385         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2386         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2387         if (ret == 0) {
2388                 /* Update address in NIC data structure */
2389                 if (on)
2390                         ether_addr_copy(addr,
2391                                         &dev->data->hash_mac_addrs[index]);
2392                 else
2393                         ether_addr_copy(&null_mac_addr,
2394                                         &dev->data->hash_mac_addrs[index]);
2395         }
2396
2397         return ret;
2398 }
2399
2400 int
2401 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2402 {
2403         struct rte_eth_dev *dev;
2404
2405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2406
2407         dev = &rte_eth_devices[port_id];
2408
2409         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2410         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2411 }
2412
2413 int
2414 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2415 {
2416         uint16_t num_vfs;
2417         struct rte_eth_dev *dev;
2418         struct rte_eth_dev_info dev_info;
2419
2420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2421
2422         dev = &rte_eth_devices[port_id];
2423         rte_eth_dev_info_get(port_id, &dev_info);
2424
2425         num_vfs = dev_info.max_vfs;
2426         if (vf > num_vfs) {
2427                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2428                 return -EINVAL;
2429         }
2430
2431         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2432         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2433 }
2434
2435 int
2436 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2437 {
2438         uint16_t num_vfs;
2439         struct rte_eth_dev *dev;
2440         struct rte_eth_dev_info dev_info;
2441
2442         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2443
2444         dev = &rte_eth_devices[port_id];
2445         rte_eth_dev_info_get(port_id, &dev_info);
2446
2447         num_vfs = dev_info.max_vfs;
2448         if (vf > num_vfs) {
2449                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2450                 return -EINVAL;
2451         }
2452
2453         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2454         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2455 }
2456
2457 int
2458 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2459                                uint64_t vf_mask, uint8_t vlan_on)
2460 {
2461         struct rte_eth_dev *dev;
2462
2463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2464
2465         dev = &rte_eth_devices[port_id];
2466
2467         if (vlan_id > ETHER_MAX_VLAN_ID) {
2468                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2469                         vlan_id);
2470                 return -EINVAL;
2471         }
2472
2473         if (vf_mask == 0) {
2474                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2475                 return -EINVAL;
2476         }
2477
2478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2479         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2480                                                    vf_mask, vlan_on);
2481 }
2482
2483 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2484                                         uint16_t tx_rate)
2485 {
2486         struct rte_eth_dev *dev;
2487         struct rte_eth_dev_info dev_info;
2488         struct rte_eth_link link;
2489
2490         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2491
2492         dev = &rte_eth_devices[port_id];
2493         rte_eth_dev_info_get(port_id, &dev_info);
2494         link = dev->data->dev_link;
2495
2496         if (queue_idx > dev_info.max_tx_queues) {
2497                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2498                                 "invalid queue id=%d\n", port_id, queue_idx);
2499                 return -EINVAL;
2500         }
2501
2502         if (tx_rate > link.link_speed) {
2503                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2504                                 "bigger than link speed= %d\n",
2505                         tx_rate, link.link_speed);
2506                 return -EINVAL;
2507         }
2508
2509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2510         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2511 }
2512
2513 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2514                                 uint64_t q_msk)
2515 {
2516         struct rte_eth_dev *dev;
2517         struct rte_eth_dev_info dev_info;
2518         struct rte_eth_link link;
2519
2520         if (q_msk == 0)
2521                 return 0;
2522
2523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2524
2525         dev = &rte_eth_devices[port_id];
2526         rte_eth_dev_info_get(port_id, &dev_info);
2527         link = dev->data->dev_link;
2528
2529         if (vf > dev_info.max_vfs) {
2530                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2531                                 "invalid vf id=%d\n", port_id, vf);
2532                 return -EINVAL;
2533         }
2534
2535         if (tx_rate > link.link_speed) {
2536                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2537                                 "bigger than link speed= %d\n",
2538                                 tx_rate, link.link_speed);
2539                 return -EINVAL;
2540         }
2541
2542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2543         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2544 }
2545
2546 int
2547 rte_eth_mirror_rule_set(uint8_t port_id,
2548                         struct rte_eth_mirror_conf *mirror_conf,
2549                         uint8_t rule_id, uint8_t on)
2550 {
2551         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2552
2553         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2554         if (mirror_conf->rule_type == 0) {
2555                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2556                 return -EINVAL;
2557         }
2558
2559         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2560                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2561                                 ETH_64_POOLS - 1);
2562                 return -EINVAL;
2563         }
2564
2565         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2566              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2567             (mirror_conf->pool_mask == 0)) {
2568                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2569                 return -EINVAL;
2570         }
2571
2572         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2573             mirror_conf->vlan.vlan_mask == 0) {
2574                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2575                 return -EINVAL;
2576         }
2577
2578         dev = &rte_eth_devices[port_id];
2579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2580
2581         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2582 }
2583
2584 int
2585 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2586 {
2587         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2588
2589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2590
2591         dev = &rte_eth_devices[port_id];
2592         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2593
2594         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2595 }
2596
2597 int
2598 rte_eth_dev_callback_register(uint8_t port_id,
2599                         enum rte_eth_event_type event,
2600                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2601 {
2602         struct rte_eth_dev *dev;
2603         struct rte_eth_dev_callback *user_cb;
2604
2605         if (!cb_fn)
2606                 return -EINVAL;
2607
2608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2609
2610         dev = &rte_eth_devices[port_id];
2611         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2612
2613         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2614                 if (user_cb->cb_fn == cb_fn &&
2615                         user_cb->cb_arg == cb_arg &&
2616                         user_cb->event == event) {
2617                         break;
2618                 }
2619         }
2620
2621         /* create a new callback. */
2622         if (user_cb == NULL)
2623                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2624                                         sizeof(struct rte_eth_dev_callback), 0);
2625         if (user_cb != NULL) {
2626                 user_cb->cb_fn = cb_fn;
2627                 user_cb->cb_arg = cb_arg;
2628                 user_cb->event = event;
2629                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2630         }
2631
2632         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2633         return (user_cb == NULL) ? -ENOMEM : 0;
2634 }
2635
2636 int
2637 rte_eth_dev_callback_unregister(uint8_t port_id,
2638                         enum rte_eth_event_type event,
2639                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2640 {
2641         int ret;
2642         struct rte_eth_dev *dev;
2643         struct rte_eth_dev_callback *cb, *next;
2644
2645         if (!cb_fn)
2646                 return -EINVAL;
2647
2648         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2649
2650         dev = &rte_eth_devices[port_id];
2651         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2652
2653         ret = 0;
2654         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2655
2656                 next = TAILQ_NEXT(cb, next);
2657
2658                 if (cb->cb_fn != cb_fn || cb->event != event ||
2659                                 (cb->cb_arg != (void *)-1 &&
2660                                 cb->cb_arg != cb_arg))
2661                         continue;
2662
2663                 /*
2664                  * if this callback is not executing right now,
2665                  * then remove it.
2666                  */
2667                 if (cb->active == 0) {
2668                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2669                         rte_free(cb);
2670                 } else {
2671                         ret = -EAGAIN;
2672                 }
2673         }
2674
2675         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2676         return ret;
2677 }
2678
2679 void
2680 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2681         enum rte_eth_event_type event)
2682 {
2683         struct rte_eth_dev_callback *cb_lst;
2684         struct rte_eth_dev_callback dev_cb;
2685
2686         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2687         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2688                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2689                         continue;
2690                 dev_cb = *cb_lst;
2691                 cb_lst->active = 1;
2692                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2693                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2694                                                 dev_cb.cb_arg);
2695                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2696                 cb_lst->active = 0;
2697         }
2698         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2699 }
2700
2701 int
2702 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2703 {
2704         uint32_t vec;
2705         struct rte_eth_dev *dev;
2706         struct rte_intr_handle *intr_handle;
2707         uint16_t qid;
2708         int rc;
2709
2710         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2711
2712         dev = &rte_eth_devices[port_id];
2713         intr_handle = &dev->pci_dev->intr_handle;
2714         if (!intr_handle->intr_vec) {
2715                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2716                 return -EPERM;
2717         }
2718
2719         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2720                 vec = intr_handle->intr_vec[qid];
2721                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2722                 if (rc && rc != -EEXIST) {
2723                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2724                                         " op %d epfd %d vec %u\n",
2725                                         port_id, qid, op, epfd, vec);
2726                 }
2727         }
2728
2729         return 0;
2730 }
2731
2732 const struct rte_memzone *
2733 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2734                          uint16_t queue_id, size_t size, unsigned align,
2735                          int socket_id)
2736 {
2737         char z_name[RTE_MEMZONE_NAMESIZE];
2738         const struct rte_memzone *mz;
2739
2740         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2741                  dev->driver->pci_drv.name, ring_name,
2742                  dev->data->port_id, queue_id);
2743
2744         mz = rte_memzone_lookup(z_name);
2745         if (mz)
2746                 return mz;
2747
2748         if (rte_xen_dom0_supported())
2749                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2750                                                    0, align, RTE_PGSIZE_2M);
2751         else
2752                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2753                                                    0, align);
2754 }
2755
2756 int
2757 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2758                           int epfd, int op, void *data)
2759 {
2760         uint32_t vec;
2761         struct rte_eth_dev *dev;
2762         struct rte_intr_handle *intr_handle;
2763         int rc;
2764
2765         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2766
2767         dev = &rte_eth_devices[port_id];
2768         if (queue_id >= dev->data->nb_rx_queues) {
2769                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2770                 return -EINVAL;
2771         }
2772
2773         intr_handle = &dev->pci_dev->intr_handle;
2774         if (!intr_handle->intr_vec) {
2775                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2776                 return -EPERM;
2777         }
2778
2779         vec = intr_handle->intr_vec[queue_id];
2780         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2781         if (rc && rc != -EEXIST) {
2782                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2783                                 " op %d epfd %d vec %u\n",
2784                                 port_id, queue_id, op, epfd, vec);
2785                 return rc;
2786         }
2787
2788         return 0;
2789 }
2790
2791 int
2792 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2793                            uint16_t queue_id)
2794 {
2795         struct rte_eth_dev *dev;
2796
2797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2798
2799         dev = &rte_eth_devices[port_id];
2800
2801         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2802         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2803 }
2804
2805 int
2806 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2807                             uint16_t queue_id)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2812
2813         dev = &rte_eth_devices[port_id];
2814
2815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2816         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2817 }
2818
2819 #ifdef RTE_NIC_BYPASS
2820 int rte_eth_dev_bypass_init(uint8_t port_id)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825
2826         dev = &rte_eth_devices[port_id];
2827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2828         (*dev->dev_ops->bypass_init)(dev);
2829         return 0;
2830 }
2831
2832 int
2833 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2834 {
2835         struct rte_eth_dev *dev;
2836
2837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838
2839         dev = &rte_eth_devices[port_id];
2840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2841         (*dev->dev_ops->bypass_state_show)(dev, state);
2842         return 0;
2843 }
2844
2845 int
2846 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2847 {
2848         struct rte_eth_dev *dev;
2849
2850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2851
2852         dev = &rte_eth_devices[port_id];
2853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2854         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2855         return 0;
2856 }
2857
2858 int
2859 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2860 {
2861         struct rte_eth_dev *dev;
2862
2863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2864
2865         dev = &rte_eth_devices[port_id];
2866         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2867         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2868         return 0;
2869 }
2870
2871 int
2872 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2873 {
2874         struct rte_eth_dev *dev;
2875
2876         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2877
2878         dev = &rte_eth_devices[port_id];
2879
2880         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2881         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2882         return 0;
2883 }
2884
2885 int
2886 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2887 {
2888         struct rte_eth_dev *dev;
2889
2890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891
2892         dev = &rte_eth_devices[port_id];
2893
2894         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2895         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2896         return 0;
2897 }
2898
2899 int
2900 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2901 {
2902         struct rte_eth_dev *dev;
2903
2904         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2905
2906         dev = &rte_eth_devices[port_id];
2907
2908         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2909         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2910         return 0;
2911 }
2912
2913 int
2914 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2915 {
2916         struct rte_eth_dev *dev;
2917
2918         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2919
2920         dev = &rte_eth_devices[port_id];
2921
2922         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2923         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2924         return 0;
2925 }
2926
2927 int
2928 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2929 {
2930         struct rte_eth_dev *dev;
2931
2932         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2933
2934         dev = &rte_eth_devices[port_id];
2935
2936         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2937         (*dev->dev_ops->bypass_wd_reset)(dev);
2938         return 0;
2939 }
2940 #endif
2941
2942 int
2943 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2944 {
2945         struct rte_eth_dev *dev;
2946
2947         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2948
2949         dev = &rte_eth_devices[port_id];
2950         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2951         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2952                                 RTE_ETH_FILTER_NOP, NULL);
2953 }
2954
2955 int
2956 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2957                        enum rte_filter_op filter_op, void *arg)
2958 {
2959         struct rte_eth_dev *dev;
2960
2961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2962
2963         dev = &rte_eth_devices[port_id];
2964         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2965         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2966 }
2967
2968 void *
2969 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2970                 rte_rx_callback_fn fn, void *user_param)
2971 {
2972 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2973         rte_errno = ENOTSUP;
2974         return NULL;
2975 #endif
2976         /* check input parameters */
2977         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2978                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2979                 rte_errno = EINVAL;
2980                 return NULL;
2981         }
2982
2983         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2984
2985         if (cb == NULL) {
2986                 rte_errno = ENOMEM;
2987                 return NULL;
2988         }
2989
2990         cb->fn.rx = fn;
2991         cb->param = user_param;
2992
2993         /* Add the callbacks in fifo order. */
2994         struct rte_eth_rxtx_callback *tail =
2995                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2996
2997         if (!tail) {
2998                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2999
3000         } else {
3001                 while (tail->next)
3002                         tail = tail->next;
3003                 tail->next = cb;
3004         }
3005
3006         return cb;
3007 }
3008
3009 void *
3010 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3011                 rte_tx_callback_fn fn, void *user_param)
3012 {
3013 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3014         rte_errno = ENOTSUP;
3015         return NULL;
3016 #endif
3017         /* check input parameters */
3018         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3019                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3020                 rte_errno = EINVAL;
3021                 return NULL;
3022         }
3023
3024         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3025
3026         if (cb == NULL) {
3027                 rte_errno = ENOMEM;
3028                 return NULL;
3029         }
3030
3031         cb->fn.tx = fn;
3032         cb->param = user_param;
3033
3034         /* Add the callbacks in fifo order. */
3035         struct rte_eth_rxtx_callback *tail =
3036                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3037
3038         if (!tail) {
3039                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3040
3041         } else {
3042                 while (tail->next)
3043                         tail = tail->next;
3044                 tail->next = cb;
3045         }
3046
3047         return cb;
3048 }
3049
3050 int
3051 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3052                 struct rte_eth_rxtx_callback *user_cb)
3053 {
3054 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3055         return -ENOTSUP;
3056 #endif
3057         /* Check input parameters. */
3058         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3059         if (user_cb == NULL ||
3060                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3061                 return -EINVAL;
3062
3063         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3064         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3065         struct rte_eth_rxtx_callback *prev_cb;
3066
3067         /* Reset head pointer and remove user cb if first in the list. */
3068         if (cb == user_cb) {
3069                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3070                 return 0;
3071         }
3072
3073         /* Remove the user cb from the callback list. */
3074         do {
3075                 prev_cb = cb;
3076                 cb = cb->next;
3077
3078                 if (cb == user_cb) {
3079                         prev_cb->next = user_cb->next;
3080                         return 0;
3081                 }
3082
3083         } while (cb != NULL);
3084
3085         /* Callback wasn't found. */
3086         return -EINVAL;
3087 }
3088
3089 int
3090 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3091                 struct rte_eth_rxtx_callback *user_cb)
3092 {
3093 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3094         return -ENOTSUP;
3095 #endif
3096         /* Check input parameters. */
3097         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3098         if (user_cb == NULL ||
3099                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3100                 return -EINVAL;
3101
3102         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3103         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3104         struct rte_eth_rxtx_callback *prev_cb;
3105
3106         /* Reset head pointer and remove user cb if first in the list. */
3107         if (cb == user_cb) {
3108                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3109                 return 0;
3110         }
3111
3112         /* Remove the user cb from the callback list. */
3113         do {
3114                 prev_cb = cb;
3115                 cb = cb->next;
3116
3117                 if (cb == user_cb) {
3118                         prev_cb->next = user_cb->next;
3119                         return 0;
3120                 }
3121
3122         } while (cb != NULL);
3123
3124         /* Callback wasn't found. */
3125         return -EINVAL;
3126 }
3127
3128 int
3129 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3130         struct rte_eth_rxq_info *qinfo)
3131 {
3132         struct rte_eth_dev *dev;
3133
3134         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3135
3136         if (qinfo == NULL)
3137                 return -EINVAL;
3138
3139         dev = &rte_eth_devices[port_id];
3140         if (queue_id >= dev->data->nb_rx_queues) {
3141                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3142                 return -EINVAL;
3143         }
3144
3145         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3146
3147         memset(qinfo, 0, sizeof(*qinfo));
3148         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3149         return 0;
3150 }
3151
3152 int
3153 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3154         struct rte_eth_txq_info *qinfo)
3155 {
3156         struct rte_eth_dev *dev;
3157
3158         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159
3160         if (qinfo == NULL)
3161                 return -EINVAL;
3162
3163         dev = &rte_eth_devices[port_id];
3164         if (queue_id >= dev->data->nb_tx_queues) {
3165                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3166                 return -EINVAL;
3167         }
3168
3169         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3170
3171         memset(qinfo, 0, sizeof(*qinfo));
3172         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3173         return 0;
3174 }
3175
3176 int
3177 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3178                              struct ether_addr *mc_addr_set,
3179                              uint32_t nb_mc_addr)
3180 {
3181         struct rte_eth_dev *dev;
3182
3183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3184
3185         dev = &rte_eth_devices[port_id];
3186         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3187         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3188 }
3189
3190 int
3191 rte_eth_timesync_enable(uint8_t port_id)
3192 {
3193         struct rte_eth_dev *dev;
3194
3195         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3196         dev = &rte_eth_devices[port_id];
3197
3198         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3199         return (*dev->dev_ops->timesync_enable)(dev);
3200 }
3201
3202 int
3203 rte_eth_timesync_disable(uint8_t port_id)
3204 {
3205         struct rte_eth_dev *dev;
3206
3207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3208         dev = &rte_eth_devices[port_id];
3209
3210         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3211         return (*dev->dev_ops->timesync_disable)(dev);
3212 }
3213
3214 int
3215 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3216                                    uint32_t flags)
3217 {
3218         struct rte_eth_dev *dev;
3219
3220         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3221         dev = &rte_eth_devices[port_id];
3222
3223         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3224         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3225 }
3226
3227 int
3228 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3229 {
3230         struct rte_eth_dev *dev;
3231
3232         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3233         dev = &rte_eth_devices[port_id];
3234
3235         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3236         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3237 }
3238
3239 int
3240 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3241 {
3242         struct rte_eth_dev *dev;
3243
3244         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3245         dev = &rte_eth_devices[port_id];
3246
3247         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3248         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3249 }
3250
3251 int
3252 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3253 {
3254         struct rte_eth_dev *dev;
3255
3256         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3257         dev = &rte_eth_devices[port_id];
3258
3259         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3260         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3261 }
3262
3263 int
3264 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3265 {
3266         struct rte_eth_dev *dev;
3267
3268         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3269         dev = &rte_eth_devices[port_id];
3270
3271         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3272         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3273 }
3274
3275 int
3276 rte_eth_dev_get_reg_length(uint8_t port_id)
3277 {
3278         struct rte_eth_dev *dev;
3279
3280         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3281
3282         dev = &rte_eth_devices[port_id];
3283         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
3284         return (*dev->dev_ops->get_reg_length)(dev);
3285 }
3286
3287 int
3288 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3289 {
3290         struct rte_eth_dev *dev;
3291
3292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3293
3294         dev = &rte_eth_devices[port_id];
3295         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3296         return (*dev->dev_ops->get_reg)(dev, info);
3297 }
3298
3299 int
3300 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3301 {
3302         struct rte_eth_dev *dev;
3303
3304         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3305
3306         dev = &rte_eth_devices[port_id];
3307         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3308         return (*dev->dev_ops->get_eeprom_length)(dev);
3309 }
3310
3311 int
3312 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3313 {
3314         struct rte_eth_dev *dev;
3315
3316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3317
3318         dev = &rte_eth_devices[port_id];
3319         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3320         return (*dev->dev_ops->get_eeprom)(dev, info);
3321 }
3322
3323 int
3324 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3325 {
3326         struct rte_eth_dev *dev;
3327
3328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3329
3330         dev = &rte_eth_devices[port_id];
3331         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3332         return (*dev->dev_ops->set_eeprom)(dev, info);
3333 }
3334
3335 int
3336 rte_eth_dev_get_dcb_info(uint8_t port_id,
3337                              struct rte_eth_dcb_info *dcb_info)
3338 {
3339         struct rte_eth_dev *dev;
3340
3341         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3342
3343         dev = &rte_eth_devices[port_id];
3344         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3345
3346         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3347         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3348 }
3349
3350 void
3351 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3352 {
3353         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3354                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3355                                 eth_dev, pci_dev);
3356                 return;
3357         }
3358
3359         eth_dev->data->dev_flags = 0;
3360         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3361                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3362         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3363                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3364
3365         eth_dev->data->kdrv = pci_dev->kdrv;
3366         eth_dev->data->numa_node = pci_dev->numa_node;
3367         eth_dev->data->drv_name = pci_dev->driver->name;
3368 }
3369
3370 int
3371 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3372                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3373 {
3374         struct rte_eth_dev *dev;
3375
3376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377         if (l2_tunnel == NULL) {
3378                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3379                 return -EINVAL;
3380         }
3381
3382         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3383                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3384                 return -EINVAL;
3385         }
3386
3387         dev = &rte_eth_devices[port_id];
3388         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3389                                 -ENOTSUP);
3390         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3391 }
3392
3393 int
3394 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3395                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3396                                   uint32_t mask,
3397                                   uint8_t en)
3398 {
3399         struct rte_eth_dev *dev;
3400
3401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3402
3403         if (l2_tunnel == NULL) {
3404                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3405                 return -EINVAL;
3406         }
3407
3408         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3409                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3410                 return -EINVAL;
3411         }
3412
3413         if (mask == 0) {
3414                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3415                 return -EINVAL;
3416         }
3417
3418         dev = &rte_eth_devices[port_id];
3419         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3420                                 -ENOTSUP);
3421         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3422 }