7c133ef0cebfcf579fe66effe0c7424330ff10ee
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return -EINVAL;                                 \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
145         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
146         {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
147         {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
148         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
149         {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
150         {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
151         {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
152         {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
153         {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
154         {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
155         {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
156 };
157 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
158
159 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
160         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
161         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
162 };
163 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
164                 sizeof(rte_rxq_stats_strings[0]))
165
166 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
167         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
168         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
169         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
170 };
171 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
172                 sizeof(rte_txq_stats_strings[0]))
173
174
175 /**
176  * The user application callback description.
177  *
178  * It contains callback address to be registered by user application,
179  * the pointer to the parameters for callback, and the event type.
180  */
181 struct rte_eth_dev_callback {
182         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
183         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
184         void *cb_arg;                           /**< Parameter for callback */
185         enum rte_eth_event_type event;          /**< Interrupt event type */
186         uint32_t active;                        /**< Callback is executing */
187 };
188
189 enum {
190         STAT_QMAP_TX = 0,
191         STAT_QMAP_RX
192 };
193
194 enum {
195         DEV_DETACHED = 0,
196         DEV_ATTACHED
197 };
198
199 static void
200 rte_eth_dev_data_alloc(void)
201 {
202         const unsigned flags = 0;
203         const struct rte_memzone *mz;
204
205         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
206                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
207                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
208                                 rte_socket_id(), flags);
209         } else
210                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
211         if (mz == NULL)
212                 rte_panic("Cannot allocate memzone for ethernet port data\n");
213
214         rte_eth_dev_data = mz->addr;
215         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
216                 memset(rte_eth_dev_data, 0,
217                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
218 }
219
220 struct rte_eth_dev *
221 rte_eth_dev_allocated(const char *name)
222 {
223         unsigned i;
224
225         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
226                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
227                     strcmp(rte_eth_devices[i].data->name, name) == 0)
228                         return &rte_eth_devices[i];
229         }
230         return NULL;
231 }
232
233 static uint8_t
234 rte_eth_dev_find_free_port(void)
235 {
236         unsigned i;
237
238         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
239                 if (rte_eth_devices[i].attached == DEV_DETACHED)
240                         return i;
241         }
242         return RTE_MAX_ETHPORTS;
243 }
244
245 struct rte_eth_dev *
246 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
247 {
248         uint8_t port_id;
249         struct rte_eth_dev *eth_dev;
250
251         port_id = rte_eth_dev_find_free_port();
252         if (port_id == RTE_MAX_ETHPORTS) {
253                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
254                 return NULL;
255         }
256
257         if (rte_eth_dev_data == NULL)
258                 rte_eth_dev_data_alloc();
259
260         if (rte_eth_dev_allocated(name) != NULL) {
261                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
262                                 name);
263                 return NULL;
264         }
265
266         eth_dev = &rte_eth_devices[port_id];
267         eth_dev->data = &rte_eth_dev_data[port_id];
268         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
269         eth_dev->data->port_id = port_id;
270         eth_dev->attached = DEV_ATTACHED;
271         eth_dev->dev_type = type;
272         nb_ports++;
273         return eth_dev;
274 }
275
276 static int
277 rte_eth_dev_create_unique_device_name(char *name, size_t size,
278                 struct rte_pci_device *pci_dev)
279 {
280         int ret;
281
282         if ((name == NULL) || (pci_dev == NULL))
283                 return -EINVAL;
284
285         ret = snprintf(name, size, "%d:%d.%d",
286                         pci_dev->addr.bus, pci_dev->addr.devid,
287                         pci_dev->addr.function);
288         if (ret < 0)
289                 return ret;
290         return 0;
291 }
292
293 int
294 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
295 {
296         if (eth_dev == NULL)
297                 return -EINVAL;
298
299         eth_dev->attached = 0;
300         nb_ports--;
301         return 0;
302 }
303
304 static int
305 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
306                  struct rte_pci_device *pci_dev)
307 {
308         struct eth_driver    *eth_drv;
309         struct rte_eth_dev *eth_dev;
310         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
311
312         int diag;
313
314         eth_drv = (struct eth_driver *)pci_drv;
315
316         /* Create unique Ethernet device name using PCI address */
317         rte_eth_dev_create_unique_device_name(ethdev_name,
318                         sizeof(ethdev_name), pci_dev);
319
320         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
321         if (eth_dev == NULL)
322                 return -ENOMEM;
323
324         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
325                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
326                                   eth_drv->dev_private_size,
327                                   RTE_CACHE_LINE_SIZE);
328                 if (eth_dev->data->dev_private == NULL)
329                         rte_panic("Cannot allocate memzone for private port data\n");
330         }
331         eth_dev->pci_dev = pci_dev;
332         eth_dev->driver = eth_drv;
333         eth_dev->data->rx_mbuf_alloc_failed = 0;
334
335         /* init user callbacks */
336         TAILQ_INIT(&(eth_dev->link_intr_cbs));
337
338         /*
339          * Set the default MTU.
340          */
341         eth_dev->data->mtu = ETHER_MTU;
342
343         /* Invoke PMD device initialization function */
344         diag = (*eth_drv->eth_dev_init)(eth_dev);
345         if (diag == 0)
346                 return 0;
347
348         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
349                         pci_drv->name,
350                         (unsigned) pci_dev->id.vendor_id,
351                         (unsigned) pci_dev->id.device_id);
352         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
353                 rte_free(eth_dev->data->dev_private);
354         eth_dev->attached = DEV_DETACHED;
355         nb_ports--;
356         return diag;
357 }
358
359 static int
360 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
361 {
362         const struct eth_driver *eth_drv;
363         struct rte_eth_dev *eth_dev;
364         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
365         int ret;
366
367         if (pci_dev == NULL)
368                 return -EINVAL;
369
370         /* Create unique Ethernet device name using PCI address */
371         rte_eth_dev_create_unique_device_name(ethdev_name,
372                         sizeof(ethdev_name), pci_dev);
373
374         eth_dev = rte_eth_dev_allocated(ethdev_name);
375         if (eth_dev == NULL)
376                 return -ENODEV;
377
378         eth_drv = (const struct eth_driver *)pci_dev->driver;
379
380         /* Invoke PMD device uninit function */
381         if (*eth_drv->eth_dev_uninit) {
382                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
383                 if (ret)
384                         return ret;
385         }
386
387         /* free ether device */
388         rte_eth_dev_release_port(eth_dev);
389
390         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
391                 rte_free(eth_dev->data->dev_private);
392
393         eth_dev->pci_dev = NULL;
394         eth_dev->driver = NULL;
395         eth_dev->data = NULL;
396
397         return 0;
398 }
399
400 /**
401  * Register an Ethernet [Poll Mode] driver.
402  *
403  * Function invoked by the initialization function of an Ethernet driver
404  * to simultaneously register itself as a PCI driver and as an Ethernet
405  * Poll Mode Driver.
406  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
407  * structure embedded in the *eth_drv* structure, after having stored the
408  * address of the rte_eth_dev_init() function in the *devinit* field of
409  * the *pci_drv* structure.
410  * During the PCI probing phase, the rte_eth_dev_init() function is
411  * invoked for each PCI [Ethernet device] matching the embedded PCI
412  * identifiers provided by the driver.
413  */
414 void
415 rte_eth_driver_register(struct eth_driver *eth_drv)
416 {
417         eth_drv->pci_drv.devinit = rte_eth_dev_init;
418         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
419         rte_eal_pci_register(&eth_drv->pci_drv);
420 }
421
422 static int
423 rte_eth_dev_is_valid_port(uint8_t port_id)
424 {
425         if (port_id >= RTE_MAX_ETHPORTS ||
426             rte_eth_devices[port_id].attached != DEV_ATTACHED)
427                 return 0;
428         else
429                 return 1;
430 }
431
432 int
433 rte_eth_dev_socket_id(uint8_t port_id)
434 {
435         if (!rte_eth_dev_is_valid_port(port_id))
436                 return -1;
437         return rte_eth_devices[port_id].pci_dev->numa_node;
438 }
439
440 uint8_t
441 rte_eth_dev_count(void)
442 {
443         return nb_ports;
444 }
445
446 /* So far, DPDK hotplug function only supports linux */
447 #ifdef RTE_LIBRTE_EAL_HOTPLUG
448
449 static enum rte_eth_dev_type
450 rte_eth_dev_get_device_type(uint8_t port_id)
451 {
452         if (!rte_eth_dev_is_valid_port(port_id))
453                 return RTE_ETH_DEV_UNKNOWN;
454         return rte_eth_devices[port_id].dev_type;
455 }
456
457 static int
458 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
459 {
460         if ((devs == NULL) ||
461             (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
462                 return -EINVAL;
463
464         /* save current rte_eth_devices */
465         memcpy(devs, rte_eth_devices, size);
466         return 0;
467 }
468
469 static int
470 rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
471 {
472         if ((devs == NULL) || (port_id == NULL))
473                 return -EINVAL;
474
475         /* check which port was attached or detached */
476         for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
477                 if (rte_eth_devices[*port_id].attached ^ devs->attached)
478                         return 0;
479         }
480         return -ENODEV;
481 }
482
483 static int
484 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
485 {
486         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
487
488         if (addr == NULL) {
489                 PMD_DEBUG_TRACE("Null pointer is specified\n");
490                 return -EINVAL;
491         }
492
493         *addr = rte_eth_devices[port_id].pci_dev->addr;
494         return 0;
495 }
496
497 static int
498 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
499 {
500         char *tmp;
501
502         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
503
504         if (name == NULL) {
505                 PMD_DEBUG_TRACE("Null pointer is specified\n");
506                 return -EINVAL;
507         }
508
509         /* shouldn't check 'rte_eth_devices[i].data',
510          * because it might be overwritten by VDEV PMD */
511         tmp = rte_eth_dev_data[port_id].name;
512         strcpy(name, tmp);
513         return 0;
514 }
515
516 static int
517 rte_eth_dev_is_detachable(uint8_t port_id)
518 {
519         uint32_t drv_flags;
520
521         if (port_id >= RTE_MAX_ETHPORTS) {
522                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
523                 return -EINVAL;
524         }
525
526         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
527                 switch (rte_eth_devices[port_id].pci_dev->kdrv) {
528                 case RTE_KDRV_IGB_UIO:
529                 case RTE_KDRV_UIO_GENERIC:
530                 case RTE_KDRV_NIC_UIO:
531                         break;
532                 case RTE_KDRV_VFIO:
533                 default:
534                         return -ENOTSUP;
535                 }
536         }
537
538         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
539         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
540 }
541
542 /* attach the new physical device, then store port_id of the device */
543 static int
544 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
545 {
546         uint8_t new_port_id;
547         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
548
549         if ((addr == NULL) || (port_id == NULL))
550                 goto err;
551
552         /* save current port status */
553         if (rte_eth_dev_save(devs, sizeof(devs)))
554                 goto err;
555         /* re-construct pci_device_list */
556         if (rte_eal_pci_scan())
557                 goto err;
558         /* invoke probe func of the driver can handle the new device.
559          * TODO:
560          * rte_eal_pci_probe_one() should return port_id.
561          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
562          * should be removed. */
563         if (rte_eal_pci_probe_one(addr))
564                 goto err;
565         /* get port_id enabled by above procedures */
566         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
567                 goto err;
568
569         *port_id = new_port_id;
570         return 0;
571 err:
572         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
573         return -1;
574 }
575
576 /* detach the new physical device, then store pci_addr of the device */
577 static int
578 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
579 {
580         struct rte_pci_addr freed_addr;
581         struct rte_pci_addr vp;
582
583         if (addr == NULL)
584                 goto err;
585
586         /* check whether the driver supports detach feature, or not */
587         if (rte_eth_dev_is_detachable(port_id))
588                 goto err;
589
590         /* get pci address by port id */
591         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
592                 goto err;
593
594         /* Zeroed pci addr means the port comes from virtual device */
595         vp.domain = vp.bus = vp.devid = vp.function = 0;
596         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
597                 goto err;
598
599         /* invoke close func of the driver,
600          * also remove the device from pci_device_list */
601         if (rte_eal_pci_close_one(&freed_addr))
602                 goto err;
603
604         *addr = freed_addr;
605         return 0;
606 err:
607         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
608         return -1;
609 }
610
611 /* attach the new virtual device, then store port_id of the device */
612 static int
613 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
614 {
615         char *name = NULL, *args = NULL;
616         uint8_t new_port_id;
617         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
618         int ret = -1;
619
620         if ((vdevargs == NULL) || (port_id == NULL))
621                 goto end;
622
623         /* parse vdevargs, then retrieve device name and args */
624         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
625                 goto end;
626
627         /* save current port status */
628         if (rte_eth_dev_save(devs, sizeof(devs)))
629                 goto end;
630         /* walk around dev_driver_list to find the driver of the device,
631          * then invoke probe function o the driver.
632          * TODO:
633          * rte_eal_vdev_init() should return port_id,
634          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
635          * should be removed. */
636         if (rte_eal_vdev_init(name, args))
637                 goto end;
638         /* get port_id enabled by above procedures */
639         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
640                 goto end;
641         ret = 0;
642         *port_id = new_port_id;
643 end:
644         if (name)
645                 free(name);
646         if (args)
647                 free(args);
648
649         if (ret < 0)
650                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
651         return ret;
652 }
653
654 /* detach the new virtual device, then store the name of the device */
655 static int
656 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
657 {
658         char name[RTE_ETH_NAME_MAX_LEN];
659
660         if (vdevname == NULL)
661                 goto err;
662
663         /* check whether the driver supports detach feature, or not */
664         if (rte_eth_dev_is_detachable(port_id))
665                 goto err;
666
667         /* get device name by port id */
668         if (rte_eth_dev_get_name_by_port(port_id, name))
669                 goto err;
670         /* walk around dev_driver_list to find the driver of the device,
671          * then invoke close function o the driver */
672         if (rte_eal_vdev_uninit(name))
673                 goto err;
674
675         strncpy(vdevname, name, sizeof(name));
676         return 0;
677 err:
678         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
679         return -1;
680 }
681
682 /* attach the new device, then store port_id of the device */
683 int
684 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
685 {
686         struct rte_pci_addr addr;
687
688         if ((devargs == NULL) || (port_id == NULL))
689                 return -EINVAL;
690
691         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
692                 return rte_eth_dev_attach_pdev(&addr, port_id);
693         else
694                 return rte_eth_dev_attach_vdev(devargs, port_id);
695 }
696
697 /* detach the device, then store the name of the device */
698 int
699 rte_eth_dev_detach(uint8_t port_id, char *name)
700 {
701         struct rte_pci_addr addr;
702         int ret;
703
704         if (name == NULL)
705                 return -EINVAL;
706
707         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
708                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
709                 if (ret < 0)
710                         return ret;
711
712                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
713                 if (ret == 0)
714                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
715                                 "%04x:%02x:%02x.%d",
716                                 addr.domain, addr.bus,
717                                 addr.devid, addr.function);
718
719                 return ret;
720         } else
721                 return rte_eth_dev_detach_vdev(port_id, name);
722 }
723 #else /* RTE_LIBRTE_EAL_HOTPLUG */
724 int
725 rte_eth_dev_attach(const char *devargs __rte_unused,
726                         uint8_t *port_id __rte_unused)
727 {
728         RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
729         return -1;
730 }
731
732 /* detach the device, then store the name of the device */
733 int
734 rte_eth_dev_detach(uint8_t port_id __rte_unused,
735                         char *name __rte_unused)
736 {
737         RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
738         return -1;
739 }
740 #endif /* RTE_LIBRTE_EAL_HOTPLUG */
741
742 static int
743 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
744 {
745         uint16_t old_nb_queues = dev->data->nb_rx_queues;
746         void **rxq;
747         unsigned i;
748
749         if (dev->data->rx_queues == NULL) { /* first time configuration */
750                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
751                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
752                                 RTE_CACHE_LINE_SIZE);
753                 if (dev->data->rx_queues == NULL) {
754                         dev->data->nb_rx_queues = 0;
755                         return -(ENOMEM);
756                 }
757         } else { /* re-configure */
758                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
759
760                 rxq = dev->data->rx_queues;
761
762                 for (i = nb_queues; i < old_nb_queues; i++)
763                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
764                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
765                                 RTE_CACHE_LINE_SIZE);
766                 if (rxq == NULL)
767                         return -(ENOMEM);
768                 if (nb_queues > old_nb_queues) {
769                         uint16_t new_qs = nb_queues - old_nb_queues;
770
771                         memset(rxq + old_nb_queues, 0,
772                                 sizeof(rxq[0]) * new_qs);
773                 }
774
775                 dev->data->rx_queues = rxq;
776
777         }
778         dev->data->nb_rx_queues = nb_queues;
779         return 0;
780 }
781
782 int
783 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
784 {
785         struct rte_eth_dev *dev;
786
787         /* This function is only safe when called from the primary process
788          * in a multi-process setup*/
789         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
790
791         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
792
793         dev = &rte_eth_devices[port_id];
794         if (rx_queue_id >= dev->data->nb_rx_queues) {
795                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
796                 return -EINVAL;
797         }
798
799         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
800
801         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
802
803 }
804
805 int
806 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
807 {
808         struct rte_eth_dev *dev;
809
810         /* This function is only safe when called from the primary process
811          * in a multi-process setup*/
812         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
813
814         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
815
816         dev = &rte_eth_devices[port_id];
817         if (rx_queue_id >= dev->data->nb_rx_queues) {
818                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
819                 return -EINVAL;
820         }
821
822         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
823
824         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
825
826 }
827
828 int
829 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
830 {
831         struct rte_eth_dev *dev;
832
833         /* This function is only safe when called from the primary process
834          * in a multi-process setup*/
835         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
836
837         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
838
839         dev = &rte_eth_devices[port_id];
840         if (tx_queue_id >= dev->data->nb_tx_queues) {
841                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
842                 return -EINVAL;
843         }
844
845         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
846
847         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
848
849 }
850
851 int
852 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
853 {
854         struct rte_eth_dev *dev;
855
856         /* This function is only safe when called from the primary process
857          * in a multi-process setup*/
858         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
859
860         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
861
862         dev = &rte_eth_devices[port_id];
863         if (tx_queue_id >= dev->data->nb_tx_queues) {
864                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
865                 return -EINVAL;
866         }
867
868         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
869
870         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
871
872 }
873
874 static int
875 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
876 {
877         uint16_t old_nb_queues = dev->data->nb_tx_queues;
878         void **txq;
879         unsigned i;
880
881         if (dev->data->tx_queues == NULL) { /* first time configuration */
882                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
883                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
884                                                    RTE_CACHE_LINE_SIZE);
885                 if (dev->data->tx_queues == NULL) {
886                         dev->data->nb_tx_queues = 0;
887                         return -(ENOMEM);
888                 }
889         } else { /* re-configure */
890                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
891
892                 txq = dev->data->tx_queues;
893
894                 for (i = nb_queues; i < old_nb_queues; i++)
895                         (*dev->dev_ops->tx_queue_release)(txq[i]);
896                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
897                                   RTE_CACHE_LINE_SIZE);
898                 if (txq == NULL)
899                         return -ENOMEM;
900                 if (nb_queues > old_nb_queues) {
901                         uint16_t new_qs = nb_queues - old_nb_queues;
902
903                         memset(txq + old_nb_queues, 0,
904                                sizeof(txq[0]) * new_qs);
905                 }
906
907                 dev->data->tx_queues = txq;
908
909         }
910         dev->data->nb_tx_queues = nb_queues;
911         return 0;
912 }
913
914 static int
915 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
916 {
917         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
918
919         switch (nb_rx_q) {
920         case 1:
921         case 2:
922                 RTE_ETH_DEV_SRIOV(dev).active =
923                         ETH_64_POOLS;
924                 break;
925         case 4:
926                 RTE_ETH_DEV_SRIOV(dev).active =
927                         ETH_32_POOLS;
928                 break;
929         default:
930                 return -EINVAL;
931         }
932
933         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
934         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
935                 dev->pci_dev->max_vfs * nb_rx_q;
936
937         return 0;
938 }
939
940 static int
941 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
942                           const struct rte_eth_conf *dev_conf)
943 {
944         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
945
946         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
947                 /* check multi-queue mode */
948                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
949                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
950                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
951                         /* SRIOV only works in VMDq enable mode */
952                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
953                                         " SRIOV active, "
954                                         "wrong VMDQ mq_mode rx %u tx %u\n",
955                                         port_id,
956                                         dev_conf->rxmode.mq_mode,
957                                         dev_conf->txmode.mq_mode);
958                         return -EINVAL;
959                 }
960
961                 switch (dev_conf->rxmode.mq_mode) {
962                 case ETH_MQ_RX_VMDQ_DCB:
963                 case ETH_MQ_RX_VMDQ_DCB_RSS:
964                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
965                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
966                                         " SRIOV active, "
967                                         "unsupported VMDQ mq_mode rx %u\n",
968                                         port_id, dev_conf->rxmode.mq_mode);
969                         return -EINVAL;
970                 case ETH_MQ_RX_RSS:
971                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
972                                         " SRIOV active, "
973                                         "Rx mq mode is changed from:"
974                                         "mq_mode %u into VMDQ mq_mode %u\n",
975                                         port_id,
976                                         dev_conf->rxmode.mq_mode,
977                                         dev->data->dev_conf.rxmode.mq_mode);
978                 case ETH_MQ_RX_VMDQ_RSS:
979                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
980                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
981                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
982                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
983                                                         " SRIOV active, invalid queue"
984                                                         " number for VMDQ RSS, allowed"
985                                                         " value are 1, 2 or 4\n",
986                                                         port_id);
987                                         return -EINVAL;
988                                 }
989                         break;
990                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
991                         /* if nothing mq mode configure, use default scheme */
992                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
993                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
994                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
995                         break;
996                 }
997
998                 switch (dev_conf->txmode.mq_mode) {
999                 case ETH_MQ_TX_VMDQ_DCB:
1000                         /* DCB VMDQ in SRIOV mode, not implement yet */
1001                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1002                                         " SRIOV active, "
1003                                         "unsupported VMDQ mq_mode tx %u\n",
1004                                         port_id, dev_conf->txmode.mq_mode);
1005                         return -EINVAL;
1006                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1007                         /* if nothing mq mode configure, use default scheme */
1008                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1009                         break;
1010                 }
1011
1012                 /* check valid queue number */
1013                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1014                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1015                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
1016                                         "queue number must less equal to %d\n",
1017                                         port_id,
1018                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1019                         return -EINVAL;
1020                 }
1021         } else {
1022                 /* For vmdb+dcb mode check our configuration before we go further */
1023                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1024                         const struct rte_eth_vmdq_dcb_conf *conf;
1025
1026                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1027                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
1028                                                 "!= %d\n",
1029                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1030                                 return -EINVAL;
1031                         }
1032                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
1033                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1034                               conf->nb_queue_pools == ETH_32_POOLS)) {
1035                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1036                                                 "nb_queue_pools must be %d or %d\n",
1037                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1038                                 return -EINVAL;
1039                         }
1040                 }
1041                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1042                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1043
1044                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1045                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
1046                                                 "!= %d\n",
1047                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1048                                 return -EINVAL;
1049                         }
1050                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
1051                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1052                               conf->nb_queue_pools == ETH_32_POOLS)) {
1053                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1054                                                 "nb_queue_pools != %d or nb_queue_pools "
1055                                                 "!= %d\n",
1056                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1057                                 return -EINVAL;
1058                         }
1059                 }
1060
1061                 /* For DCB mode check our configuration before we go further */
1062                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1063                         const struct rte_eth_dcb_rx_conf *conf;
1064
1065                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
1066                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
1067                                                 "!= %d\n",
1068                                                 port_id, ETH_DCB_NUM_QUEUES);
1069                                 return -EINVAL;
1070                         }
1071                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
1072                         if (!(conf->nb_tcs == ETH_4_TCS ||
1073                               conf->nb_tcs == ETH_8_TCS)) {
1074                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1075                                                 "nb_tcs != %d or nb_tcs "
1076                                                 "!= %d\n",
1077                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1078                                 return -EINVAL;
1079                         }
1080                 }
1081
1082                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1083                         const struct rte_eth_dcb_tx_conf *conf;
1084
1085                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
1086                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
1087                                                 "!= %d\n",
1088                                                 port_id, ETH_DCB_NUM_QUEUES);
1089                                 return -EINVAL;
1090                         }
1091                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
1092                         if (!(conf->nb_tcs == ETH_4_TCS ||
1093                               conf->nb_tcs == ETH_8_TCS)) {
1094                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1095                                                 "nb_tcs != %d or nb_tcs "
1096                                                 "!= %d\n",
1097                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1098                                 return -EINVAL;
1099                         }
1100                 }
1101         }
1102         return 0;
1103 }
1104
1105 int
1106 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1107                       const struct rte_eth_conf *dev_conf)
1108 {
1109         struct rte_eth_dev *dev;
1110         struct rte_eth_dev_info dev_info;
1111         int diag;
1112
1113         /* This function is only safe when called from the primary process
1114          * in a multi-process setup*/
1115         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1116
1117         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1118
1119         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1120                 PMD_DEBUG_TRACE(
1121                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1122                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1123                 return -EINVAL;
1124         }
1125
1126         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1127                 PMD_DEBUG_TRACE(
1128                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1129                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1130                 return -EINVAL;
1131         }
1132
1133         dev = &rte_eth_devices[port_id];
1134
1135         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1136         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1137
1138         if (dev->data->dev_started) {
1139                 PMD_DEBUG_TRACE(
1140                     "port %d must be stopped to allow configuration\n", port_id);
1141                 return -EBUSY;
1142         }
1143
1144         /*
1145          * Check that the numbers of RX and TX queues are not greater
1146          * than the maximum number of RX and TX queues supported by the
1147          * configured device.
1148          */
1149         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1150         if (nb_rx_q > dev_info.max_rx_queues) {
1151                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1152                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1153                 return -EINVAL;
1154         }
1155         if (nb_rx_q == 0) {
1156                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
1157                 return -EINVAL;
1158         }
1159
1160         if (nb_tx_q > dev_info.max_tx_queues) {
1161                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1162                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1163                 return -EINVAL;
1164         }
1165         if (nb_tx_q == 0) {
1166                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
1167                 return -EINVAL;
1168         }
1169
1170         /* Copy the dev_conf parameter into the dev structure */
1171         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1172
1173         /*
1174          * If link state interrupt is enabled, check that the
1175          * device supports it.
1176          */
1177         if (dev_conf->intr_conf.lsc == 1) {
1178                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
1179
1180                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1181                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1182                                         pci_drv->name);
1183                         return -EINVAL;
1184                 }
1185         }
1186
1187         /*
1188          * If jumbo frames are enabled, check that the maximum RX packet
1189          * length is supported by the configured device.
1190          */
1191         if (dev_conf->rxmode.jumbo_frame == 1) {
1192                 if (dev_conf->rxmode.max_rx_pkt_len >
1193                     dev_info.max_rx_pktlen) {
1194                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1195                                 " > max valid value %u\n",
1196                                 port_id,
1197                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1198                                 (unsigned)dev_info.max_rx_pktlen);
1199                         return -EINVAL;
1200                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1201                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1202                                 " < min valid value %u\n",
1203                                 port_id,
1204                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1205                                 (unsigned)ETHER_MIN_LEN);
1206                         return -EINVAL;
1207                 }
1208         } else {
1209                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1210                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1211                         /* Use default value */
1212                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1213                                                         ETHER_MAX_LEN;
1214         }
1215
1216         /* multiple queue mode checking */
1217         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
1218         if (diag != 0) {
1219                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
1220                                 port_id, diag);
1221                 return diag;
1222         }
1223
1224         /*
1225          * Setup new number of RX/TX queues and reconfigure device.
1226          */
1227         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1228         if (diag != 0) {
1229                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1230                                 port_id, diag);
1231                 return diag;
1232         }
1233
1234         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1235         if (diag != 0) {
1236                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1237                                 port_id, diag);
1238                 rte_eth_dev_rx_queue_config(dev, 0);
1239                 return diag;
1240         }
1241
1242         diag = (*dev->dev_ops->dev_configure)(dev);
1243         if (diag != 0) {
1244                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1245                                 port_id, diag);
1246                 rte_eth_dev_rx_queue_config(dev, 0);
1247                 rte_eth_dev_tx_queue_config(dev, 0);
1248                 return diag;
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void
1255 rte_eth_dev_config_restore(uint8_t port_id)
1256 {
1257         struct rte_eth_dev *dev;
1258         struct rte_eth_dev_info dev_info;
1259         struct ether_addr addr;
1260         uint16_t i;
1261         uint32_t pool = 0;
1262
1263         dev = &rte_eth_devices[port_id];
1264
1265         rte_eth_dev_info_get(port_id, &dev_info);
1266
1267         if (RTE_ETH_DEV_SRIOV(dev).active)
1268                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1269
1270         /* replay MAC address configuration */
1271         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1272                 addr = dev->data->mac_addrs[i];
1273
1274                 /* skip zero address */
1275                 if (is_zero_ether_addr(&addr))
1276                         continue;
1277
1278                 /* add address to the hardware */
1279                 if  (*dev->dev_ops->mac_addr_add &&
1280                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1281                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1282                 else {
1283                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1284                                         port_id);
1285                         /* exit the loop but not return an error */
1286                         break;
1287                 }
1288         }
1289
1290         /* replay promiscuous configuration */
1291         if (rte_eth_promiscuous_get(port_id) == 1)
1292                 rte_eth_promiscuous_enable(port_id);
1293         else if (rte_eth_promiscuous_get(port_id) == 0)
1294                 rte_eth_promiscuous_disable(port_id);
1295
1296         /* replay all multicast configuration */
1297         if (rte_eth_allmulticast_get(port_id) == 1)
1298                 rte_eth_allmulticast_enable(port_id);
1299         else if (rte_eth_allmulticast_get(port_id) == 0)
1300                 rte_eth_allmulticast_disable(port_id);
1301 }
1302
1303 int
1304 rte_eth_dev_start(uint8_t port_id)
1305 {
1306         struct rte_eth_dev *dev;
1307         int diag;
1308
1309         /* This function is only safe when called from the primary process
1310          * in a multi-process setup*/
1311         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1312
1313         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1314
1315         dev = &rte_eth_devices[port_id];
1316
1317         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1318
1319         if (dev->data->dev_started != 0) {
1320                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1321                         " already started\n",
1322                         port_id);
1323                 return 0;
1324         }
1325
1326         diag = (*dev->dev_ops->dev_start)(dev);
1327         if (diag == 0)
1328                 dev->data->dev_started = 1;
1329         else
1330                 return diag;
1331
1332         rte_eth_dev_config_restore(port_id);
1333
1334         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1335                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1336                 (*dev->dev_ops->link_update)(dev, 0);
1337         }
1338         return 0;
1339 }
1340
1341 void
1342 rte_eth_dev_stop(uint8_t port_id)
1343 {
1344         struct rte_eth_dev *dev;
1345
1346         /* This function is only safe when called from the primary process
1347          * in a multi-process setup*/
1348         PROC_PRIMARY_OR_RET();
1349
1350         VALID_PORTID_OR_RET(port_id);
1351         dev = &rte_eth_devices[port_id];
1352
1353         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1354
1355         if (dev->data->dev_started == 0) {
1356                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1357                         " already stopped\n",
1358                         port_id);
1359                 return;
1360         }
1361
1362         dev->data->dev_started = 0;
1363         (*dev->dev_ops->dev_stop)(dev);
1364 }
1365
1366 int
1367 rte_eth_dev_set_link_up(uint8_t port_id)
1368 {
1369         struct rte_eth_dev *dev;
1370
1371         /* This function is only safe when called from the primary process
1372          * in a multi-process setup*/
1373         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1374
1375         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1376
1377         dev = &rte_eth_devices[port_id];
1378
1379         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1380         return (*dev->dev_ops->dev_set_link_up)(dev);
1381 }
1382
1383 int
1384 rte_eth_dev_set_link_down(uint8_t port_id)
1385 {
1386         struct rte_eth_dev *dev;
1387
1388         /* This function is only safe when called from the primary process
1389          * in a multi-process setup*/
1390         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1391
1392         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1393
1394         dev = &rte_eth_devices[port_id];
1395
1396         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1397         return (*dev->dev_ops->dev_set_link_down)(dev);
1398 }
1399
1400 void
1401 rte_eth_dev_close(uint8_t port_id)
1402 {
1403         struct rte_eth_dev *dev;
1404
1405         /* This function is only safe when called from the primary process
1406          * in a multi-process setup*/
1407         PROC_PRIMARY_OR_RET();
1408
1409         VALID_PORTID_OR_RET(port_id);
1410         dev = &rte_eth_devices[port_id];
1411
1412         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1413         dev->data->dev_started = 0;
1414         (*dev->dev_ops->dev_close)(dev);
1415 }
1416
1417 int
1418 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1419                        uint16_t nb_rx_desc, unsigned int socket_id,
1420                        const struct rte_eth_rxconf *rx_conf,
1421                        struct rte_mempool *mp)
1422 {
1423         int ret;
1424         uint32_t mbp_buf_size;
1425         struct rte_eth_dev *dev;
1426         struct rte_eth_dev_info dev_info;
1427
1428         /* This function is only safe when called from the primary process
1429          * in a multi-process setup*/
1430         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1431
1432         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1433
1434         dev = &rte_eth_devices[port_id];
1435         if (rx_queue_id >= dev->data->nb_rx_queues) {
1436                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1437                 return -EINVAL;
1438         }
1439
1440         if (dev->data->dev_started) {
1441                 PMD_DEBUG_TRACE(
1442                     "port %d must be stopped to allow configuration\n", port_id);
1443                 return -EBUSY;
1444         }
1445
1446         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1447         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1448
1449         /*
1450          * Check the size of the mbuf data buffer.
1451          * This value must be provided in the private data of the memory pool.
1452          * First check that the memory pool has a valid private data.
1453          */
1454         rte_eth_dev_info_get(port_id, &dev_info);
1455         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1456                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1457                                 mp->name, (int) mp->private_data_size,
1458                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1459                 return -ENOSPC;
1460         }
1461         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1462
1463         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1464                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1465                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1466                                 "=%d)\n",
1467                                 mp->name,
1468                                 (int)mbp_buf_size,
1469                                 (int)(RTE_PKTMBUF_HEADROOM +
1470                                       dev_info.min_rx_bufsize),
1471                                 (int)RTE_PKTMBUF_HEADROOM,
1472                                 (int)dev_info.min_rx_bufsize);
1473                 return -EINVAL;
1474         }
1475
1476         if (rx_conf == NULL)
1477                 rx_conf = &dev_info.default_rxconf;
1478
1479         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1480                                               socket_id, rx_conf, mp);
1481         if (!ret) {
1482                 if (!dev->data->min_rx_buf_size ||
1483                     dev->data->min_rx_buf_size > mbp_buf_size)
1484                         dev->data->min_rx_buf_size = mbp_buf_size;
1485         }
1486
1487         return ret;
1488 }
1489
1490 int
1491 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1492                        uint16_t nb_tx_desc, unsigned int socket_id,
1493                        const struct rte_eth_txconf *tx_conf)
1494 {
1495         struct rte_eth_dev *dev;
1496         struct rte_eth_dev_info dev_info;
1497
1498         /* This function is only safe when called from the primary process
1499          * in a multi-process setup*/
1500         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1501
1502         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1503
1504         dev = &rte_eth_devices[port_id];
1505         if (tx_queue_id >= dev->data->nb_tx_queues) {
1506                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1507                 return -EINVAL;
1508         }
1509
1510         if (dev->data->dev_started) {
1511                 PMD_DEBUG_TRACE(
1512                     "port %d must be stopped to allow configuration\n", port_id);
1513                 return -EBUSY;
1514         }
1515
1516         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1517         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1518
1519         rte_eth_dev_info_get(port_id, &dev_info);
1520
1521         if (tx_conf == NULL)
1522                 tx_conf = &dev_info.default_txconf;
1523
1524         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1525                                                socket_id, tx_conf);
1526 }
1527
1528 void
1529 rte_eth_promiscuous_enable(uint8_t port_id)
1530 {
1531         struct rte_eth_dev *dev;
1532
1533         VALID_PORTID_OR_RET(port_id);
1534         dev = &rte_eth_devices[port_id];
1535
1536         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1537         (*dev->dev_ops->promiscuous_enable)(dev);
1538         dev->data->promiscuous = 1;
1539 }
1540
1541 void
1542 rte_eth_promiscuous_disable(uint8_t port_id)
1543 {
1544         struct rte_eth_dev *dev;
1545
1546         VALID_PORTID_OR_RET(port_id);
1547         dev = &rte_eth_devices[port_id];
1548
1549         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1550         dev->data->promiscuous = 0;
1551         (*dev->dev_ops->promiscuous_disable)(dev);
1552 }
1553
1554 int
1555 rte_eth_promiscuous_get(uint8_t port_id)
1556 {
1557         struct rte_eth_dev *dev;
1558
1559         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1560
1561         dev = &rte_eth_devices[port_id];
1562         return dev->data->promiscuous;
1563 }
1564
1565 void
1566 rte_eth_allmulticast_enable(uint8_t port_id)
1567 {
1568         struct rte_eth_dev *dev;
1569
1570         VALID_PORTID_OR_RET(port_id);
1571         dev = &rte_eth_devices[port_id];
1572
1573         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1574         (*dev->dev_ops->allmulticast_enable)(dev);
1575         dev->data->all_multicast = 1;
1576 }
1577
1578 void
1579 rte_eth_allmulticast_disable(uint8_t port_id)
1580 {
1581         struct rte_eth_dev *dev;
1582
1583         VALID_PORTID_OR_RET(port_id);
1584         dev = &rte_eth_devices[port_id];
1585
1586         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1587         dev->data->all_multicast = 0;
1588         (*dev->dev_ops->allmulticast_disable)(dev);
1589 }
1590
1591 int
1592 rte_eth_allmulticast_get(uint8_t port_id)
1593 {
1594         struct rte_eth_dev *dev;
1595
1596         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1597
1598         dev = &rte_eth_devices[port_id];
1599         return dev->data->all_multicast;
1600 }
1601
1602 static inline int
1603 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1604                                 struct rte_eth_link *link)
1605 {
1606         struct rte_eth_link *dst = link;
1607         struct rte_eth_link *src = &(dev->data->dev_link);
1608
1609         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1610                                         *(uint64_t *)src) == 0)
1611                 return -1;
1612
1613         return 0;
1614 }
1615
1616 void
1617 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1618 {
1619         struct rte_eth_dev *dev;
1620
1621         VALID_PORTID_OR_RET(port_id);
1622         dev = &rte_eth_devices[port_id];
1623
1624         if (dev->data->dev_conf.intr_conf.lsc != 0)
1625                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1626         else {
1627                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1628                 (*dev->dev_ops->link_update)(dev, 1);
1629                 *eth_link = dev->data->dev_link;
1630         }
1631 }
1632
1633 void
1634 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1635 {
1636         struct rte_eth_dev *dev;
1637
1638         VALID_PORTID_OR_RET(port_id);
1639         dev = &rte_eth_devices[port_id];
1640
1641         if (dev->data->dev_conf.intr_conf.lsc != 0)
1642                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1643         else {
1644                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1645                 (*dev->dev_ops->link_update)(dev, 0);
1646                 *eth_link = dev->data->dev_link;
1647         }
1648 }
1649
1650 int
1651 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1652 {
1653         struct rte_eth_dev *dev;
1654
1655         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1656
1657         dev = &rte_eth_devices[port_id];
1658         memset(stats, 0, sizeof(*stats));
1659
1660         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1661         (*dev->dev_ops->stats_get)(dev, stats);
1662         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1663         return 0;
1664 }
1665
1666 void
1667 rte_eth_stats_reset(uint8_t port_id)
1668 {
1669         struct rte_eth_dev *dev;
1670
1671         VALID_PORTID_OR_RET(port_id);
1672         dev = &rte_eth_devices[port_id];
1673
1674         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1675         (*dev->dev_ops->stats_reset)(dev);
1676 }
1677
1678 /* retrieve ethdev extended statistics */
1679 int
1680 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1681         unsigned n)
1682 {
1683         struct rte_eth_stats eth_stats;
1684         struct rte_eth_dev *dev;
1685         unsigned count, i, q;
1686         uint64_t val, *stats_ptr;
1687
1688         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1689
1690         dev = &rte_eth_devices[port_id];
1691
1692         /* implemented by the driver */
1693         if (dev->dev_ops->xstats_get != NULL)
1694                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1695
1696         /* else, return generic statistics */
1697         count = RTE_NB_STATS;
1698         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1699         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1700         if (n < count)
1701                 return count;
1702
1703         /* now fill the xstats structure */
1704
1705         count = 0;
1706         rte_eth_stats_get(port_id, &eth_stats);
1707
1708         /* global stats */
1709         for (i = 0; i < RTE_NB_STATS; i++) {
1710                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1711                                         rte_stats_strings[i].offset);
1712                 val = *stats_ptr;
1713                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1714                         "%s", rte_stats_strings[i].name);
1715                 xstats[count++].value = val;
1716         }
1717
1718         /* per-rxq stats */
1719         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1720                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1721                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1722                                         rte_rxq_stats_strings[i].offset +
1723                                         q * sizeof(uint64_t));
1724                         val = *stats_ptr;
1725                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1726                                 "rx_queue_%u_%s", q,
1727                                 rte_rxq_stats_strings[i].name);
1728                         xstats[count++].value = val;
1729                 }
1730         }
1731
1732         /* per-txq stats */
1733         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1734                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1735                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1736                                         rte_txq_stats_strings[i].offset +
1737                                         q * sizeof(uint64_t));
1738                         val = *stats_ptr;
1739                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1740                                 "tx_queue_%u_%s", q,
1741                                 rte_txq_stats_strings[i].name);
1742                         xstats[count++].value = val;
1743                 }
1744         }
1745
1746         return count;
1747 }
1748
1749 /* reset ethdev extended statistics */
1750 void
1751 rte_eth_xstats_reset(uint8_t port_id)
1752 {
1753         struct rte_eth_dev *dev;
1754
1755         VALID_PORTID_OR_RET(port_id);
1756         dev = &rte_eth_devices[port_id];
1757
1758         /* implemented by the driver */
1759         if (dev->dev_ops->xstats_reset != NULL) {
1760                 (*dev->dev_ops->xstats_reset)(dev);
1761                 return;
1762         }
1763
1764         /* fallback to default */
1765         rte_eth_stats_reset(port_id);
1766 }
1767
1768 static int
1769 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1770                 uint8_t is_rx)
1771 {
1772         struct rte_eth_dev *dev;
1773
1774         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1775
1776         dev = &rte_eth_devices[port_id];
1777
1778         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1779         return (*dev->dev_ops->queue_stats_mapping_set)
1780                         (dev, queue_id, stat_idx, is_rx);
1781 }
1782
1783
1784 int
1785 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1786                 uint8_t stat_idx)
1787 {
1788         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1789                         STAT_QMAP_TX);
1790 }
1791
1792
1793 int
1794 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1795                 uint8_t stat_idx)
1796 {
1797         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1798                         STAT_QMAP_RX);
1799 }
1800
1801
1802 void
1803 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1804 {
1805         struct rte_eth_dev *dev;
1806
1807         VALID_PORTID_OR_RET(port_id);
1808         dev = &rte_eth_devices[port_id];
1809
1810         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1811
1812         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1813         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1814         dev_info->pci_dev = dev->pci_dev;
1815         if (dev->driver)
1816                 dev_info->driver_name = dev->driver->pci_drv.name;
1817 }
1818
1819 void
1820 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1821 {
1822         struct rte_eth_dev *dev;
1823
1824         VALID_PORTID_OR_RET(port_id);
1825         dev = &rte_eth_devices[port_id];
1826         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1827 }
1828
1829
1830 int
1831 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1832 {
1833         struct rte_eth_dev *dev;
1834
1835         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1836
1837         dev = &rte_eth_devices[port_id];
1838         *mtu = dev->data->mtu;
1839         return 0;
1840 }
1841
1842 int
1843 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1844 {
1845         int ret;
1846         struct rte_eth_dev *dev;
1847
1848         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1849         dev = &rte_eth_devices[port_id];
1850         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1851
1852         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1853         if (!ret)
1854                 dev->data->mtu = mtu;
1855
1856         return ret;
1857 }
1858
1859 int
1860 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1861 {
1862         struct rte_eth_dev *dev;
1863
1864         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1865         dev = &rte_eth_devices[port_id];
1866         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1867                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1868                 return -ENOSYS;
1869         }
1870
1871         if (vlan_id > 4095) {
1872                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1873                                 port_id, (unsigned) vlan_id);
1874                 return -EINVAL;
1875         }
1876         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1877
1878         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1879 }
1880
1881 int
1882 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1883 {
1884         struct rte_eth_dev *dev;
1885
1886         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1887         dev = &rte_eth_devices[port_id];
1888         if (rx_queue_id >= dev->data->nb_rx_queues) {
1889                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1890                 return -EINVAL;
1891         }
1892
1893         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1894         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1895
1896         return 0;
1897 }
1898
1899 int
1900 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1901 {
1902         struct rte_eth_dev *dev;
1903
1904         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1905         dev = &rte_eth_devices[port_id];
1906         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1907         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1908
1909         return 0;
1910 }
1911
1912 int
1913 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1914 {
1915         struct rte_eth_dev *dev;
1916         int ret = 0;
1917         int mask = 0;
1918         int cur, org = 0;
1919
1920         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1921         dev = &rte_eth_devices[port_id];
1922
1923         /*check which option changed by application*/
1924         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1925         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1926         if (cur != org) {
1927                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1928                 mask |= ETH_VLAN_STRIP_MASK;
1929         }
1930
1931         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1932         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1933         if (cur != org) {
1934                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1935                 mask |= ETH_VLAN_FILTER_MASK;
1936         }
1937
1938         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1939         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1940         if (cur != org) {
1941                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1942                 mask |= ETH_VLAN_EXTEND_MASK;
1943         }
1944
1945         /*no change*/
1946         if (mask == 0)
1947                 return ret;
1948
1949         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1950         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1951
1952         return ret;
1953 }
1954
1955 int
1956 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1957 {
1958         struct rte_eth_dev *dev;
1959         int ret = 0;
1960
1961         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1962         dev = &rte_eth_devices[port_id];
1963
1964         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1965                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1966
1967         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1968                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1969
1970         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1971                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1972
1973         return ret;
1974 }
1975
1976 int
1977 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1978 {
1979         struct rte_eth_dev *dev;
1980
1981         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1982         dev = &rte_eth_devices[port_id];
1983         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1984         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1985
1986         return 0;
1987 }
1988
1989 int
1990 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1991                                       struct rte_fdir_filter *fdir_filter,
1992                                       uint8_t queue)
1993 {
1994         struct rte_eth_dev *dev;
1995
1996         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1997         dev = &rte_eth_devices[port_id];
1998
1999         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2000                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2001                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2002                 return -ENOSYS;
2003         }
2004
2005         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2006              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2007             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2008                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2009                                 "None l4type, source & destinations ports "
2010                                 "should be null!\n");
2011                 return -EINVAL;
2012         }
2013
2014         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
2015         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
2016                                                                 queue);
2017 }
2018
2019 int
2020 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
2021                                          struct rte_fdir_filter *fdir_filter,
2022                                          uint8_t queue)
2023 {
2024         struct rte_eth_dev *dev;
2025
2026         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2027         dev = &rte_eth_devices[port_id];
2028
2029         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2030                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2031                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2032                 return -ENOSYS;
2033         }
2034
2035         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2036              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2037             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2038                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2039                                 "None l4type, source & destinations ports "
2040                                 "should be null!\n");
2041                 return -EINVAL;
2042         }
2043
2044         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
2045         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
2046                                                                 queue);
2047
2048 }
2049
2050 int
2051 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
2052                                          struct rte_fdir_filter *fdir_filter)
2053 {
2054         struct rte_eth_dev *dev;
2055
2056         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2057         dev = &rte_eth_devices[port_id];
2058
2059         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2060                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2061                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2062                 return -ENOSYS;
2063         }
2064
2065         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2066              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2067             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2068                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2069                                 "None l4type source & destinations ports "
2070                                 "should be null!\n");
2071                 return -EINVAL;
2072         }
2073
2074         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
2075         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
2076 }
2077
2078 int
2079 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
2080 {
2081         struct rte_eth_dev *dev;
2082
2083         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2084         dev = &rte_eth_devices[port_id];
2085         if (!(dev->data->dev_conf.fdir_conf.mode)) {
2086                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2087                 return -ENOSYS;
2088         }
2089
2090         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
2091
2092         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
2093         return 0;
2094 }
2095
2096 int
2097 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
2098                                     struct rte_fdir_filter *fdir_filter,
2099                                     uint16_t soft_id, uint8_t queue,
2100                                     uint8_t drop)
2101 {
2102         struct rte_eth_dev *dev;
2103
2104         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2105         dev = &rte_eth_devices[port_id];
2106
2107         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2108                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2109                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2110                 return -ENOSYS;
2111         }
2112
2113         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2114              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2115             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2116                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2117                                 "None l4type, source & destinations ports "
2118                                 "should be null!\n");
2119                 return -EINVAL;
2120         }
2121
2122         /* For now IPv6 is not supported with perfect filter */
2123         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2124                 return -ENOTSUP;
2125
2126         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
2127         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
2128                                                                 soft_id, queue,
2129                                                                 drop);
2130 }
2131
2132 int
2133 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2134                                        struct rte_fdir_filter *fdir_filter,
2135                                        uint16_t soft_id, uint8_t queue,
2136                                        uint8_t drop)
2137 {
2138         struct rte_eth_dev *dev;
2139
2140         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2141         dev = &rte_eth_devices[port_id];
2142
2143         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2144                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2145                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2146                 return -ENOSYS;
2147         }
2148
2149         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2150              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2151             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2152                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2153                                 "None l4type, source & destinations ports "
2154                                 "should be null!\n");
2155                 return -EINVAL;
2156         }
2157
2158         /* For now IPv6 is not supported with perfect filter */
2159         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2160                 return -ENOTSUP;
2161
2162         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2163         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2164                                                         soft_id, queue, drop);
2165 }
2166
2167 int
2168 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2169                                        struct rte_fdir_filter *fdir_filter,
2170                                        uint16_t soft_id)
2171 {
2172         struct rte_eth_dev *dev;
2173
2174         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2175         dev = &rte_eth_devices[port_id];
2176
2177         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2178                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2179                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2180                 return -ENOSYS;
2181         }
2182
2183         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2184              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2185             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2186                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2187                                 "None l4type, source & destinations ports "
2188                                 "should be null!\n");
2189                 return -EINVAL;
2190         }
2191
2192         /* For now IPv6 is not supported with perfect filter */
2193         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2194                 return -ENOTSUP;
2195
2196         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2197         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2198                                                                 soft_id);
2199 }
2200
2201 int
2202 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2203 {
2204         struct rte_eth_dev *dev;
2205
2206         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2207         dev = &rte_eth_devices[port_id];
2208         if (!(dev->data->dev_conf.fdir_conf.mode)) {
2209                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2210                 return -ENOSYS;
2211         }
2212
2213         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2214         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2215 }
2216
2217 int
2218 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2219 {
2220         struct rte_eth_dev *dev;
2221
2222         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2223         dev = &rte_eth_devices[port_id];
2224         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2225         memset(fc_conf, 0, sizeof(*fc_conf));
2226         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2227 }
2228
2229 int
2230 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2231 {
2232         struct rte_eth_dev *dev;
2233
2234         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2235         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2236                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2237                 return -EINVAL;
2238         }
2239
2240         dev = &rte_eth_devices[port_id];
2241         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2242         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2243 }
2244
2245 int
2246 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2247 {
2248         struct rte_eth_dev *dev;
2249
2250         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2252                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2253                 return -EINVAL;
2254         }
2255
2256         dev = &rte_eth_devices[port_id];
2257         /* High water, low water validation are device specific */
2258         if  (*dev->dev_ops->priority_flow_ctrl_set)
2259                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2260         return -ENOTSUP;
2261 }
2262
2263 static int
2264 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2265                         uint16_t reta_size)
2266 {
2267         uint16_t i, num;
2268
2269         if (!reta_conf)
2270                 return -EINVAL;
2271
2272         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2273                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2274                                                         RTE_RETA_GROUP_SIZE);
2275                 return -EINVAL;
2276         }
2277
2278         num = reta_size / RTE_RETA_GROUP_SIZE;
2279         for (i = 0; i < num; i++) {
2280                 if (reta_conf[i].mask)
2281                         return 0;
2282         }
2283
2284         return -EINVAL;
2285 }
2286
2287 static int
2288 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2289                          uint16_t reta_size,
2290                          uint8_t max_rxq)
2291 {
2292         uint16_t i, idx, shift;
2293
2294         if (!reta_conf)
2295                 return -EINVAL;
2296
2297         if (max_rxq == 0) {
2298                 PMD_DEBUG_TRACE("No receive queue is available\n");
2299                 return -EINVAL;
2300         }
2301
2302         for (i = 0; i < reta_size; i++) {
2303                 idx = i / RTE_RETA_GROUP_SIZE;
2304                 shift = i % RTE_RETA_GROUP_SIZE;
2305                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2306                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2307                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2308                                 "the maximum rxq index: %u\n", idx, shift,
2309                                 reta_conf[idx].reta[shift], max_rxq);
2310                         return -EINVAL;
2311                 }
2312         }
2313
2314         return 0;
2315 }
2316
2317 int
2318 rte_eth_dev_rss_reta_update(uint8_t port_id,
2319                             struct rte_eth_rss_reta_entry64 *reta_conf,
2320                             uint16_t reta_size)
2321 {
2322         struct rte_eth_dev *dev;
2323         int ret;
2324
2325         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2326         /* Check mask bits */
2327         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2328         if (ret < 0)
2329                 return ret;
2330
2331         dev = &rte_eth_devices[port_id];
2332
2333         /* Check entry value */
2334         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2335                                 dev->data->nb_rx_queues);
2336         if (ret < 0)
2337                 return ret;
2338
2339         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2340         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2341 }
2342
2343 int
2344 rte_eth_dev_rss_reta_query(uint8_t port_id,
2345                            struct rte_eth_rss_reta_entry64 *reta_conf,
2346                            uint16_t reta_size)
2347 {
2348         struct rte_eth_dev *dev;
2349         int ret;
2350
2351         if (port_id >= nb_ports) {
2352                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2353                 return -ENODEV;
2354         }
2355
2356         /* Check mask bits */
2357         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2358         if (ret < 0)
2359                 return ret;
2360
2361         dev = &rte_eth_devices[port_id];
2362         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2363         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2364 }
2365
2366 int
2367 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2368 {
2369         struct rte_eth_dev *dev;
2370         uint16_t rss_hash_protos;
2371
2372         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2373         rss_hash_protos = rss_conf->rss_hf;
2374         if ((rss_hash_protos != 0) &&
2375             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2376                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2377                                 rss_hash_protos);
2378                 return -EINVAL;
2379         }
2380         dev = &rte_eth_devices[port_id];
2381         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2382         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2383 }
2384
2385 int
2386 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2387                               struct rte_eth_rss_conf *rss_conf)
2388 {
2389         struct rte_eth_dev *dev;
2390
2391         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2392         dev = &rte_eth_devices[port_id];
2393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2394         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2395 }
2396
2397 int
2398 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2399                            struct rte_eth_udp_tunnel *udp_tunnel)
2400 {
2401         struct rte_eth_dev *dev;
2402
2403         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404         if (udp_tunnel == NULL) {
2405                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2406                 return -EINVAL;
2407         }
2408
2409         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2410                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2411                 return -EINVAL;
2412         }
2413
2414         dev = &rte_eth_devices[port_id];
2415         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2416         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2417 }
2418
2419 int
2420 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2421                               struct rte_eth_udp_tunnel *udp_tunnel)
2422 {
2423         struct rte_eth_dev *dev;
2424
2425         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2426         dev = &rte_eth_devices[port_id];
2427
2428         if (udp_tunnel == NULL) {
2429                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2430                 return -EINVAL;
2431         }
2432
2433         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2434                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2435                 return -EINVAL;
2436         }
2437
2438         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2439         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2440 }
2441
2442 int
2443 rte_eth_led_on(uint8_t port_id)
2444 {
2445         struct rte_eth_dev *dev;
2446
2447         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2448         dev = &rte_eth_devices[port_id];
2449         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2450         return (*dev->dev_ops->dev_led_on)(dev);
2451 }
2452
2453 int
2454 rte_eth_led_off(uint8_t port_id)
2455 {
2456         struct rte_eth_dev *dev;
2457
2458         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2459         dev = &rte_eth_devices[port_id];
2460         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2461         return (*dev->dev_ops->dev_led_off)(dev);
2462 }
2463
2464 /*
2465  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2466  * an empty spot.
2467  */
2468 static int
2469 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2470 {
2471         struct rte_eth_dev_info dev_info;
2472         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2473         unsigned i;
2474
2475         rte_eth_dev_info_get(port_id, &dev_info);
2476
2477         for (i = 0; i < dev_info.max_mac_addrs; i++)
2478                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2479                         return i;
2480
2481         return -1;
2482 }
2483
2484 static const struct ether_addr null_mac_addr;
2485
2486 int
2487 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2488                         uint32_t pool)
2489 {
2490         struct rte_eth_dev *dev;
2491         int index;
2492         uint64_t pool_mask;
2493
2494         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2495         dev = &rte_eth_devices[port_id];
2496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2497
2498         if (is_zero_ether_addr(addr)) {
2499                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2500                         port_id);
2501                 return -EINVAL;
2502         }
2503         if (pool >= ETH_64_POOLS) {
2504                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2505                 return -EINVAL;
2506         }
2507
2508         index = get_mac_addr_index(port_id, addr);
2509         if (index < 0) {
2510                 index = get_mac_addr_index(port_id, &null_mac_addr);
2511                 if (index < 0) {
2512                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2513                                 port_id);
2514                         return -ENOSPC;
2515                 }
2516         } else {
2517                 pool_mask = dev->data->mac_pool_sel[index];
2518
2519                 /* Check if both MAC address and pool is already there, and do nothing */
2520                 if (pool_mask & (1ULL << pool))
2521                         return 0;
2522         }
2523
2524         /* Update NIC */
2525         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2526
2527         /* Update address in NIC data structure */
2528         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2529
2530         /* Update pool bitmap in NIC data structure */
2531         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2532
2533         return 0;
2534 }
2535
2536 int
2537 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2538 {
2539         struct rte_eth_dev *dev;
2540         int index;
2541
2542         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2543         dev = &rte_eth_devices[port_id];
2544         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2545
2546         index = get_mac_addr_index(port_id, addr);
2547         if (index == 0) {
2548                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2549                 return -EADDRINUSE;
2550         } else if (index < 0)
2551                 return 0;  /* Do nothing if address wasn't found */
2552
2553         /* Update NIC */
2554         (*dev->dev_ops->mac_addr_remove)(dev, index);
2555
2556         /* Update address in NIC data structure */
2557         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2558
2559         /* reset pool bitmap */
2560         dev->data->mac_pool_sel[index] = 0;
2561
2562         return 0;
2563 }
2564
2565 int
2566 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2567                                 uint16_t rx_mode, uint8_t on)
2568 {
2569         uint16_t num_vfs;
2570         struct rte_eth_dev *dev;
2571         struct rte_eth_dev_info dev_info;
2572
2573         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2574
2575         dev = &rte_eth_devices[port_id];
2576         rte_eth_dev_info_get(port_id, &dev_info);
2577
2578         num_vfs = dev_info.max_vfs;
2579         if (vf > num_vfs) {
2580                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2581                 return -EINVAL;
2582         }
2583
2584         if (rx_mode == 0) {
2585                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2586                 return -EINVAL;
2587         }
2588         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2589         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2590 }
2591
2592 /*
2593  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2594  * an empty spot.
2595  */
2596 static int
2597 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2598 {
2599         struct rte_eth_dev_info dev_info;
2600         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2601         unsigned i;
2602
2603         rte_eth_dev_info_get(port_id, &dev_info);
2604         if (!dev->data->hash_mac_addrs)
2605                 return -1;
2606
2607         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2608                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2609                         ETHER_ADDR_LEN) == 0)
2610                         return i;
2611
2612         return -1;
2613 }
2614
2615 int
2616 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2617                                 uint8_t on)
2618 {
2619         int index;
2620         int ret;
2621         struct rte_eth_dev *dev;
2622
2623         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2624
2625         dev = &rte_eth_devices[port_id];
2626         if (is_zero_ether_addr(addr)) {
2627                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2628                         port_id);
2629                 return -EINVAL;
2630         }
2631
2632         index = get_hash_mac_addr_index(port_id, addr);
2633         /* Check if it's already there, and do nothing */
2634         if ((index >= 0) && (on))
2635                 return 0;
2636
2637         if (index < 0) {
2638                 if (!on) {
2639                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2640                                 "set in UTA\n", port_id);
2641                         return -EINVAL;
2642                 }
2643
2644                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2645                 if (index < 0) {
2646                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2647                                         port_id);
2648                         return -ENOSPC;
2649                 }
2650         }
2651
2652         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2653         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2654         if (ret == 0) {
2655                 /* Update address in NIC data structure */
2656                 if (on)
2657                         ether_addr_copy(addr,
2658                                         &dev->data->hash_mac_addrs[index]);
2659                 else
2660                         ether_addr_copy(&null_mac_addr,
2661                                         &dev->data->hash_mac_addrs[index]);
2662         }
2663
2664         return ret;
2665 }
2666
2667 int
2668 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2669 {
2670         struct rte_eth_dev *dev;
2671
2672         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2673
2674         dev = &rte_eth_devices[port_id];
2675
2676         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2677         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2678 }
2679
2680 int
2681 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2682 {
2683         uint16_t num_vfs;
2684         struct rte_eth_dev *dev;
2685         struct rte_eth_dev_info dev_info;
2686
2687         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2688
2689         dev = &rte_eth_devices[port_id];
2690         rte_eth_dev_info_get(port_id, &dev_info);
2691
2692         num_vfs = dev_info.max_vfs;
2693         if (vf > num_vfs) {
2694                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2695                 return -EINVAL;
2696         }
2697
2698         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2699         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2700 }
2701
2702 int
2703 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2704 {
2705         uint16_t num_vfs;
2706         struct rte_eth_dev *dev;
2707         struct rte_eth_dev_info dev_info;
2708
2709         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2710
2711         dev = &rte_eth_devices[port_id];
2712         rte_eth_dev_info_get(port_id, &dev_info);
2713
2714         num_vfs = dev_info.max_vfs;
2715         if (vf > num_vfs) {
2716                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2717                 return -EINVAL;
2718         }
2719
2720         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2721         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2722 }
2723
2724 int
2725 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2726                                uint64_t vf_mask, uint8_t vlan_on)
2727 {
2728         struct rte_eth_dev *dev;
2729
2730         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2731
2732         dev = &rte_eth_devices[port_id];
2733
2734         if (vlan_id > ETHER_MAX_VLAN_ID) {
2735                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2736                         vlan_id);
2737                 return -EINVAL;
2738         }
2739
2740         if (vf_mask == 0) {
2741                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2742                 return -EINVAL;
2743         }
2744
2745         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2746         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2747                                                    vf_mask, vlan_on);
2748 }
2749
2750 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2751                                         uint16_t tx_rate)
2752 {
2753         struct rte_eth_dev *dev;
2754         struct rte_eth_dev_info dev_info;
2755         struct rte_eth_link link;
2756
2757         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2758
2759         dev = &rte_eth_devices[port_id];
2760         rte_eth_dev_info_get(port_id, &dev_info);
2761         link = dev->data->dev_link;
2762
2763         if (queue_idx > dev_info.max_tx_queues) {
2764                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2765                                 "invalid queue id=%d\n", port_id, queue_idx);
2766                 return -EINVAL;
2767         }
2768
2769         if (tx_rate > link.link_speed) {
2770                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2771                                 "bigger than link speed= %d\n",
2772                         tx_rate, link.link_speed);
2773                 return -EINVAL;
2774         }
2775
2776         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2777         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2778 }
2779
2780 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2781                                 uint64_t q_msk)
2782 {
2783         struct rte_eth_dev *dev;
2784         struct rte_eth_dev_info dev_info;
2785         struct rte_eth_link link;
2786
2787         if (q_msk == 0)
2788                 return 0;
2789
2790         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2791
2792         dev = &rte_eth_devices[port_id];
2793         rte_eth_dev_info_get(port_id, &dev_info);
2794         link = dev->data->dev_link;
2795
2796         if (vf > dev_info.max_vfs) {
2797                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2798                                 "invalid vf id=%d\n", port_id, vf);
2799                 return -EINVAL;
2800         }
2801
2802         if (tx_rate > link.link_speed) {
2803                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2804                                 "bigger than link speed= %d\n",
2805                                 tx_rate, link.link_speed);
2806                 return -EINVAL;
2807         }
2808
2809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2810         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2811 }
2812
2813 int
2814 rte_eth_mirror_rule_set(uint8_t port_id,
2815                         struct rte_eth_mirror_conf *mirror_conf,
2816                         uint8_t rule_id, uint8_t on)
2817 {
2818         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2819
2820         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2821         if (mirror_conf->rule_type == 0) {
2822                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2823                 return -EINVAL;
2824         }
2825
2826         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2827                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2828                                 ETH_64_POOLS - 1);
2829                 return -EINVAL;
2830         }
2831
2832         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2833              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2834             (mirror_conf->pool_mask == 0)) {
2835                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2836                 return -EINVAL;
2837         }
2838
2839         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2840             mirror_conf->vlan.vlan_mask == 0) {
2841                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2842                 return -EINVAL;
2843         }
2844
2845         dev = &rte_eth_devices[port_id];
2846         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2847
2848         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2849 }
2850
2851 int
2852 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2853 {
2854         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2855
2856         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857
2858         dev = &rte_eth_devices[port_id];
2859         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2860
2861         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2862 }
2863
2864 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2865 uint16_t
2866 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2867                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2868 {
2869         struct rte_eth_dev *dev;
2870
2871         VALID_PORTID_OR_ERR_RET(port_id, 0);
2872
2873         dev = &rte_eth_devices[port_id];
2874         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2875         if (queue_id >= dev->data->nb_rx_queues) {
2876                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2877                 return 0;
2878         }
2879         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2880                                                 rx_pkts, nb_pkts);
2881 }
2882
2883 uint16_t
2884 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2885                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2886 {
2887         struct rte_eth_dev *dev;
2888
2889         VALID_PORTID_OR_ERR_RET(port_id, 0);
2890
2891         dev = &rte_eth_devices[port_id];
2892
2893         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2894         if (queue_id >= dev->data->nb_tx_queues) {
2895                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2896                 return 0;
2897         }
2898         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2899                                                 tx_pkts, nb_pkts);
2900 }
2901
2902 uint32_t
2903 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2904 {
2905         struct rte_eth_dev *dev;
2906
2907         VALID_PORTID_OR_ERR_RET(port_id, 0);
2908
2909         dev = &rte_eth_devices[port_id];
2910         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2911         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2912 }
2913
2914 int
2915 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2916 {
2917         struct rte_eth_dev *dev;
2918
2919         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920
2921         dev = &rte_eth_devices[port_id];
2922         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2923         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2924                                                    offset);
2925 }
2926 #endif
2927
2928 int
2929 rte_eth_dev_callback_register(uint8_t port_id,
2930                         enum rte_eth_event_type event,
2931                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2932 {
2933         struct rte_eth_dev *dev;
2934         struct rte_eth_dev_callback *user_cb;
2935
2936         if (!cb_fn)
2937                 return -EINVAL;
2938
2939         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2940
2941         dev = &rte_eth_devices[port_id];
2942         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2943
2944         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2945                 if (user_cb->cb_fn == cb_fn &&
2946                         user_cb->cb_arg == cb_arg &&
2947                         user_cb->event == event) {
2948                         break;
2949                 }
2950         }
2951
2952         /* create a new callback. */
2953         if (user_cb == NULL &&
2954             (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2955                                    sizeof(struct rte_eth_dev_callback), 0))) {
2956                 user_cb->cb_fn = cb_fn;
2957                 user_cb->cb_arg = cb_arg;
2958                 user_cb->event = event;
2959                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2960         }
2961
2962         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2963         return (user_cb == NULL) ? -ENOMEM : 0;
2964 }
2965
2966 int
2967 rte_eth_dev_callback_unregister(uint8_t port_id,
2968                         enum rte_eth_event_type event,
2969                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2970 {
2971         int ret;
2972         struct rte_eth_dev *dev;
2973         struct rte_eth_dev_callback *cb, *next;
2974
2975         if (!cb_fn)
2976                 return -EINVAL;
2977
2978         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2979
2980         dev = &rte_eth_devices[port_id];
2981         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2982
2983         ret = 0;
2984         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2985
2986                 next = TAILQ_NEXT(cb, next);
2987
2988                 if (cb->cb_fn != cb_fn || cb->event != event ||
2989                                 (cb->cb_arg != (void *)-1 &&
2990                                 cb->cb_arg != cb_arg))
2991                         continue;
2992
2993                 /*
2994                  * if this callback is not executing right now,
2995                  * then remove it.
2996                  */
2997                 if (cb->active == 0) {
2998                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2999                         rte_free(cb);
3000                 } else {
3001                         ret = -EAGAIN;
3002                 }
3003         }
3004
3005         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3006         return ret;
3007 }
3008
3009 void
3010 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3011         enum rte_eth_event_type event)
3012 {
3013         struct rte_eth_dev_callback *cb_lst;
3014         struct rte_eth_dev_callback dev_cb;
3015
3016         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3017         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3018                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3019                         continue;
3020                 dev_cb = *cb_lst;
3021                 cb_lst->active = 1;
3022                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3023                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3024                                                 dev_cb.cb_arg);
3025                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3026                 cb_lst->active = 0;
3027         }
3028         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3029 }
3030 #ifdef RTE_NIC_BYPASS
3031 int rte_eth_dev_bypass_init(uint8_t port_id)
3032 {
3033         struct rte_eth_dev *dev;
3034
3035         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3036
3037         dev = &rte_eth_devices[port_id];
3038         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3039         (*dev->dev_ops->bypass_init)(dev);
3040         return 0;
3041 }
3042
3043 int
3044 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3045 {
3046         struct rte_eth_dev *dev;
3047
3048         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3049
3050         dev = &rte_eth_devices[port_id];
3051         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3052         (*dev->dev_ops->bypass_state_show)(dev, state);
3053         return 0;
3054 }
3055
3056 int
3057 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3058 {
3059         struct rte_eth_dev *dev;
3060
3061         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3062
3063         dev = &rte_eth_devices[port_id];
3064         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3065         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3066         return 0;
3067 }
3068
3069 int
3070 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3071 {
3072         struct rte_eth_dev *dev;
3073
3074         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3075
3076         dev = &rte_eth_devices[port_id];
3077         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3078         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3079         return 0;
3080 }
3081
3082 int
3083 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3084 {
3085         struct rte_eth_dev *dev;
3086
3087         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3088
3089         dev = &rte_eth_devices[port_id];
3090
3091         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3092         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3093         return 0;
3094 }
3095
3096 int
3097 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3098 {
3099         struct rte_eth_dev *dev;
3100
3101         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3102
3103         dev = &rte_eth_devices[port_id];
3104
3105         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3106         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3107         return 0;
3108 }
3109
3110 int
3111 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3112 {
3113         struct rte_eth_dev *dev;
3114
3115         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3116
3117         dev = &rte_eth_devices[port_id];
3118
3119         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3120         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3121         return 0;
3122 }
3123
3124 int
3125 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3126 {
3127         struct rte_eth_dev *dev;
3128
3129         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3130
3131         dev = &rte_eth_devices[port_id];
3132
3133         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3134         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3135         return 0;
3136 }
3137
3138 int
3139 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3140 {
3141         struct rte_eth_dev *dev;
3142
3143         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3144
3145         dev = &rte_eth_devices[port_id];
3146
3147         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3148         (*dev->dev_ops->bypass_wd_reset)(dev);
3149         return 0;
3150 }
3151 #endif
3152
3153 int
3154 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3155 {
3156         struct rte_eth_dev *dev;
3157
3158         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159
3160         dev = &rte_eth_devices[port_id];
3161         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3162         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3163                                 RTE_ETH_FILTER_NOP, NULL);
3164 }
3165
3166 int
3167 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3168                        enum rte_filter_op filter_op, void *arg)
3169 {
3170         struct rte_eth_dev *dev;
3171
3172         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3173
3174         dev = &rte_eth_devices[port_id];
3175         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3176         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3177 }
3178
3179 void *
3180 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3181                 rte_rx_callback_fn fn, void *user_param)
3182 {
3183 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3184         rte_errno = ENOTSUP;
3185         return NULL;
3186 #endif
3187         /* check input parameters */
3188         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3189                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3190                 rte_errno = EINVAL;
3191                 return NULL;
3192         }
3193
3194         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3195
3196         if (cb == NULL) {
3197                 rte_errno = ENOMEM;
3198                 return NULL;
3199         }
3200
3201         cb->fn.rx = fn;
3202         cb->param = user_param;
3203         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3204         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3205         return cb;
3206 }
3207
3208 void *
3209 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3210                 rte_tx_callback_fn fn, void *user_param)
3211 {
3212 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3213         rte_errno = ENOTSUP;
3214         return NULL;
3215 #endif
3216         /* check input parameters */
3217         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3218                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3219                 rte_errno = EINVAL;
3220                 return NULL;
3221         }
3222
3223         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3224
3225         if (cb == NULL) {
3226                 rte_errno = ENOMEM;
3227                 return NULL;
3228         }
3229
3230         cb->fn.tx = fn;
3231         cb->param = user_param;
3232         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3233         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3234         return cb;
3235 }
3236
3237 int
3238 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3239                 struct rte_eth_rxtx_callback *user_cb)
3240 {
3241 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3242         return -ENOTSUP;
3243 #endif
3244         /* Check input parameters. */
3245         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3246                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3247                 return -EINVAL;
3248         }
3249
3250         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3251         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3252         struct rte_eth_rxtx_callback *prev_cb;
3253
3254         /* Reset head pointer and remove user cb if first in the list. */
3255         if (cb == user_cb) {
3256                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3257                 return 0;
3258         }
3259
3260         /* Remove the user cb from the callback list. */
3261         do {
3262                 prev_cb = cb;
3263                 cb = cb->next;
3264
3265                 if (cb == user_cb) {
3266                         prev_cb->next = user_cb->next;
3267                         return 0;
3268                 }
3269
3270         } while (cb != NULL);
3271
3272         /* Callback wasn't found. */
3273         return -EINVAL;
3274 }
3275
3276 int
3277 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3278                 struct rte_eth_rxtx_callback *user_cb)
3279 {
3280 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3281         return -ENOTSUP;
3282 #endif
3283         /* Check input parameters. */
3284         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3285                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3286                 return -EINVAL;
3287         }
3288
3289         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3290         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3291         struct rte_eth_rxtx_callback *prev_cb;
3292
3293         /* Reset head pointer and remove user cb if first in the list. */
3294         if (cb == user_cb) {
3295                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3296                 return 0;
3297         }
3298
3299         /* Remove the user cb from the callback list. */
3300         do {
3301                 prev_cb = cb;
3302                 cb = cb->next;
3303
3304                 if (cb == user_cb) {
3305                         prev_cb->next = user_cb->next;
3306                         return 0;
3307                 }
3308
3309         } while (cb != NULL);
3310
3311         /* Callback wasn't found. */
3312         return -EINVAL;
3313 }
3314
3315 int
3316 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3317                              struct ether_addr *mc_addr_set,
3318                              uint32_t nb_mc_addr)
3319 {
3320         struct rte_eth_dev *dev;
3321
3322         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3323
3324         dev = &rte_eth_devices[port_id];
3325         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3326         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3327 }