ethdev: add ieee1588 timestamping
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while (0)
87
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while (0)
94
95 /* Macros to check for invalid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while (0)
102
103 #define FUNC_PTR_OR_RET(func) do { \
104         if ((func) == NULL) { \
105                 PMD_DEBUG_TRACE("Function not supported\n"); \
106                 return; \
107         } \
108 } while (0)
109
110 /* Macros to check for valid port */
111 #define VALID_PORTID_OR_ERR_RET(port_id, retval) do {           \
112         if (!rte_eth_dev_is_valid_port(port_id)) {              \
113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
114                 return -EINVAL;                                 \
115         }                                                       \
116 } while (0)
117
118 #define VALID_PORTID_OR_RET(port_id) do {                       \
119         if (!rte_eth_dev_is_valid_port(port_id)) {              \
120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
121                 return;                                         \
122         }                                                       \
123 } while (0)
124
125 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
126 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
127 static struct rte_eth_dev_data *rte_eth_dev_data;
128 static uint8_t nb_ports;
129
130 /* spinlock for eth device callbacks */
131 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
132
133 /* store statistics names and its offset in stats structure  */
134 struct rte_eth_xstats_name_off {
135         char name[RTE_ETH_XSTATS_NAME_SIZE];
136         unsigned offset;
137 };
138
139 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
140         {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
141         {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
142         {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
143         {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
144         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
145         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
146         {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
147         {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
148         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
149         {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
150         {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
151         {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
152         {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
153         {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
154         {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
155         {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
156 };
157 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
158
159 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
160         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
161         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
162 };
163 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
164                 sizeof(rte_rxq_stats_strings[0]))
165
166 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
167         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
168         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
169         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
170 };
171 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
172                 sizeof(rte_txq_stats_strings[0]))
173
174
175 /**
176  * The user application callback description.
177  *
178  * It contains callback address to be registered by user application,
179  * the pointer to the parameters for callback, and the event type.
180  */
181 struct rte_eth_dev_callback {
182         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
183         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
184         void *cb_arg;                           /**< Parameter for callback */
185         enum rte_eth_event_type event;          /**< Interrupt event type */
186         uint32_t active;                        /**< Callback is executing */
187 };
188
189 enum {
190         STAT_QMAP_TX = 0,
191         STAT_QMAP_RX
192 };
193
194 enum {
195         DEV_DETACHED = 0,
196         DEV_ATTACHED
197 };
198
199 static void
200 rte_eth_dev_data_alloc(void)
201 {
202         const unsigned flags = 0;
203         const struct rte_memzone *mz;
204
205         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
206                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
207                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
208                                 rte_socket_id(), flags);
209         } else
210                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
211         if (mz == NULL)
212                 rte_panic("Cannot allocate memzone for ethernet port data\n");
213
214         rte_eth_dev_data = mz->addr;
215         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
216                 memset(rte_eth_dev_data, 0,
217                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
218 }
219
220 struct rte_eth_dev *
221 rte_eth_dev_allocated(const char *name)
222 {
223         unsigned i;
224
225         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
226                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
227                     strcmp(rte_eth_devices[i].data->name, name) == 0)
228                         return &rte_eth_devices[i];
229         }
230         return NULL;
231 }
232
233 static uint8_t
234 rte_eth_dev_find_free_port(void)
235 {
236         unsigned i;
237
238         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
239                 if (rte_eth_devices[i].attached == DEV_DETACHED)
240                         return i;
241         }
242         return RTE_MAX_ETHPORTS;
243 }
244
245 struct rte_eth_dev *
246 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
247 {
248         uint8_t port_id;
249         struct rte_eth_dev *eth_dev;
250
251         port_id = rte_eth_dev_find_free_port();
252         if (port_id == RTE_MAX_ETHPORTS) {
253                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
254                 return NULL;
255         }
256
257         if (rte_eth_dev_data == NULL)
258                 rte_eth_dev_data_alloc();
259
260         if (rte_eth_dev_allocated(name) != NULL) {
261                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
262                                 name);
263                 return NULL;
264         }
265
266         eth_dev = &rte_eth_devices[port_id];
267         eth_dev->data = &rte_eth_dev_data[port_id];
268         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
269         eth_dev->data->port_id = port_id;
270         eth_dev->attached = DEV_ATTACHED;
271         eth_dev->dev_type = type;
272         nb_ports++;
273         return eth_dev;
274 }
275
276 static int
277 rte_eth_dev_create_unique_device_name(char *name, size_t size,
278                 struct rte_pci_device *pci_dev)
279 {
280         int ret;
281
282         if ((name == NULL) || (pci_dev == NULL))
283                 return -EINVAL;
284
285         ret = snprintf(name, size, "%d:%d.%d",
286                         pci_dev->addr.bus, pci_dev->addr.devid,
287                         pci_dev->addr.function);
288         if (ret < 0)
289                 return ret;
290         return 0;
291 }
292
293 int
294 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
295 {
296         if (eth_dev == NULL)
297                 return -EINVAL;
298
299         eth_dev->attached = 0;
300         nb_ports--;
301         return 0;
302 }
303
304 static int
305 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
306                  struct rte_pci_device *pci_dev)
307 {
308         struct eth_driver    *eth_drv;
309         struct rte_eth_dev *eth_dev;
310         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
311
312         int diag;
313
314         eth_drv = (struct eth_driver *)pci_drv;
315
316         /* Create unique Ethernet device name using PCI address */
317         rte_eth_dev_create_unique_device_name(ethdev_name,
318                         sizeof(ethdev_name), pci_dev);
319
320         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
321         if (eth_dev == NULL)
322                 return -ENOMEM;
323
324         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
325                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
326                                   eth_drv->dev_private_size,
327                                   RTE_CACHE_LINE_SIZE);
328                 if (eth_dev->data->dev_private == NULL)
329                         rte_panic("Cannot allocate memzone for private port data\n");
330         }
331         eth_dev->pci_dev = pci_dev;
332         eth_dev->driver = eth_drv;
333         eth_dev->data->rx_mbuf_alloc_failed = 0;
334
335         /* init user callbacks */
336         TAILQ_INIT(&(eth_dev->link_intr_cbs));
337
338         /*
339          * Set the default MTU.
340          */
341         eth_dev->data->mtu = ETHER_MTU;
342
343         /* Invoke PMD device initialization function */
344         diag = (*eth_drv->eth_dev_init)(eth_dev);
345         if (diag == 0)
346                 return 0;
347
348         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
349                         pci_drv->name,
350                         (unsigned) pci_dev->id.vendor_id,
351                         (unsigned) pci_dev->id.device_id);
352         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
353                 rte_free(eth_dev->data->dev_private);
354         eth_dev->attached = DEV_DETACHED;
355         nb_ports--;
356         return diag;
357 }
358
359 static int
360 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
361 {
362         const struct eth_driver *eth_drv;
363         struct rte_eth_dev *eth_dev;
364         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
365         int ret;
366
367         if (pci_dev == NULL)
368                 return -EINVAL;
369
370         /* Create unique Ethernet device name using PCI address */
371         rte_eth_dev_create_unique_device_name(ethdev_name,
372                         sizeof(ethdev_name), pci_dev);
373
374         eth_dev = rte_eth_dev_allocated(ethdev_name);
375         if (eth_dev == NULL)
376                 return -ENODEV;
377
378         eth_drv = (const struct eth_driver *)pci_dev->driver;
379
380         /* Invoke PMD device uninit function */
381         if (*eth_drv->eth_dev_uninit) {
382                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
383                 if (ret)
384                         return ret;
385         }
386
387         /* free ether device */
388         rte_eth_dev_release_port(eth_dev);
389
390         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
391                 rte_free(eth_dev->data->dev_private);
392
393         eth_dev->pci_dev = NULL;
394         eth_dev->driver = NULL;
395         eth_dev->data = NULL;
396
397         return 0;
398 }
399
400 /**
401  * Register an Ethernet [Poll Mode] driver.
402  *
403  * Function invoked by the initialization function of an Ethernet driver
404  * to simultaneously register itself as a PCI driver and as an Ethernet
405  * Poll Mode Driver.
406  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
407  * structure embedded in the *eth_drv* structure, after having stored the
408  * address of the rte_eth_dev_init() function in the *devinit* field of
409  * the *pci_drv* structure.
410  * During the PCI probing phase, the rte_eth_dev_init() function is
411  * invoked for each PCI [Ethernet device] matching the embedded PCI
412  * identifiers provided by the driver.
413  */
414 void
415 rte_eth_driver_register(struct eth_driver *eth_drv)
416 {
417         eth_drv->pci_drv.devinit = rte_eth_dev_init;
418         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
419         rte_eal_pci_register(&eth_drv->pci_drv);
420 }
421
422 static int
423 rte_eth_dev_is_valid_port(uint8_t port_id)
424 {
425         if (port_id >= RTE_MAX_ETHPORTS ||
426             rte_eth_devices[port_id].attached != DEV_ATTACHED)
427                 return 0;
428         else
429                 return 1;
430 }
431
432 int
433 rte_eth_dev_socket_id(uint8_t port_id)
434 {
435         if (!rte_eth_dev_is_valid_port(port_id))
436                 return -1;
437         return rte_eth_devices[port_id].pci_dev->numa_node;
438 }
439
440 uint8_t
441 rte_eth_dev_count(void)
442 {
443         return nb_ports;
444 }
445
446 static enum rte_eth_dev_type
447 rte_eth_dev_get_device_type(uint8_t port_id)
448 {
449         if (!rte_eth_dev_is_valid_port(port_id))
450                 return RTE_ETH_DEV_UNKNOWN;
451         return rte_eth_devices[port_id].dev_type;
452 }
453
454 static int
455 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
456 {
457         if ((devs == NULL) ||
458             (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
459                 return -EINVAL;
460
461         /* save current rte_eth_devices */
462         memcpy(devs, rte_eth_devices, size);
463         return 0;
464 }
465
466 static int
467 rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
468 {
469         if ((devs == NULL) || (port_id == NULL))
470                 return -EINVAL;
471
472         /* check which port was attached or detached */
473         for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
474                 if (rte_eth_devices[*port_id].attached ^ devs->attached)
475                         return 0;
476         }
477         return -ENODEV;
478 }
479
480 static int
481 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
482 {
483         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
484
485         if (addr == NULL) {
486                 PMD_DEBUG_TRACE("Null pointer is specified\n");
487                 return -EINVAL;
488         }
489
490         *addr = rte_eth_devices[port_id].pci_dev->addr;
491         return 0;
492 }
493
494 static int
495 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
496 {
497         char *tmp;
498
499         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
500
501         if (name == NULL) {
502                 PMD_DEBUG_TRACE("Null pointer is specified\n");
503                 return -EINVAL;
504         }
505
506         /* shouldn't check 'rte_eth_devices[i].data',
507          * because it might be overwritten by VDEV PMD */
508         tmp = rte_eth_dev_data[port_id].name;
509         strcpy(name, tmp);
510         return 0;
511 }
512
513 static int
514 rte_eth_dev_is_detachable(uint8_t port_id)
515 {
516         uint32_t drv_flags;
517
518         if (port_id >= RTE_MAX_ETHPORTS) {
519                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
520                 return -EINVAL;
521         }
522
523         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
524                 switch (rte_eth_devices[port_id].pci_dev->kdrv) {
525                 case RTE_KDRV_IGB_UIO:
526                 case RTE_KDRV_UIO_GENERIC:
527                 case RTE_KDRV_NIC_UIO:
528                         break;
529                 case RTE_KDRV_VFIO:
530                 default:
531                         return -ENOTSUP;
532                 }
533         }
534
535         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
536         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
537 }
538
539 /* attach the new physical device, then store port_id of the device */
540 static int
541 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
542 {
543         uint8_t new_port_id;
544         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
545
546         if ((addr == NULL) || (port_id == NULL))
547                 goto err;
548
549         /* save current port status */
550         if (rte_eth_dev_save(devs, sizeof(devs)))
551                 goto err;
552         /* re-construct pci_device_list */
553         if (rte_eal_pci_scan())
554                 goto err;
555         /* invoke probe func of the driver can handle the new device.
556          * TODO:
557          * rte_eal_pci_probe_one() should return port_id.
558          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
559          * should be removed. */
560         if (rte_eal_pci_probe_one(addr))
561                 goto err;
562         /* get port_id enabled by above procedures */
563         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
564                 goto err;
565
566         *port_id = new_port_id;
567         return 0;
568 err:
569         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
570         return -1;
571 }
572
573 /* detach the new physical device, then store pci_addr of the device */
574 static int
575 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
576 {
577         struct rte_pci_addr freed_addr;
578         struct rte_pci_addr vp;
579
580         if (addr == NULL)
581                 goto err;
582
583         /* check whether the driver supports detach feature, or not */
584         if (rte_eth_dev_is_detachable(port_id))
585                 goto err;
586
587         /* get pci address by port id */
588         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
589                 goto err;
590
591         /* Zeroed pci addr means the port comes from virtual device */
592         vp.domain = vp.bus = vp.devid = vp.function = 0;
593         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
594                 goto err;
595
596         /* invoke close func of the driver,
597          * also remove the device from pci_device_list */
598         if (rte_eal_pci_close_one(&freed_addr))
599                 goto err;
600
601         *addr = freed_addr;
602         return 0;
603 err:
604         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
605         return -1;
606 }
607
608 /* attach the new virtual device, then store port_id of the device */
609 static int
610 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
611 {
612         char *name = NULL, *args = NULL;
613         uint8_t new_port_id;
614         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
615         int ret = -1;
616
617         if ((vdevargs == NULL) || (port_id == NULL))
618                 goto end;
619
620         /* parse vdevargs, then retrieve device name and args */
621         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
622                 goto end;
623
624         /* save current port status */
625         if (rte_eth_dev_save(devs, sizeof(devs)))
626                 goto end;
627         /* walk around dev_driver_list to find the driver of the device,
628          * then invoke probe function o the driver.
629          * TODO:
630          * rte_eal_vdev_init() should return port_id,
631          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
632          * should be removed. */
633         if (rte_eal_vdev_init(name, args))
634                 goto end;
635         /* get port_id enabled by above procedures */
636         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
637                 goto end;
638         ret = 0;
639         *port_id = new_port_id;
640 end:
641         if (name)
642                 free(name);
643         if (args)
644                 free(args);
645
646         if (ret < 0)
647                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
648         return ret;
649 }
650
651 /* detach the new virtual device, then store the name of the device */
652 static int
653 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
654 {
655         char name[RTE_ETH_NAME_MAX_LEN];
656
657         if (vdevname == NULL)
658                 goto err;
659
660         /* check whether the driver supports detach feature, or not */
661         if (rte_eth_dev_is_detachable(port_id))
662                 goto err;
663
664         /* get device name by port id */
665         if (rte_eth_dev_get_name_by_port(port_id, name))
666                 goto err;
667         /* walk around dev_driver_list to find the driver of the device,
668          * then invoke close function o the driver */
669         if (rte_eal_vdev_uninit(name))
670                 goto err;
671
672         strncpy(vdevname, name, sizeof(name));
673         return 0;
674 err:
675         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
676         return -1;
677 }
678
679 /* attach the new device, then store port_id of the device */
680 int
681 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
682 {
683         struct rte_pci_addr addr;
684
685         if ((devargs == NULL) || (port_id == NULL))
686                 return -EINVAL;
687
688         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
689                 return rte_eth_dev_attach_pdev(&addr, port_id);
690         else
691                 return rte_eth_dev_attach_vdev(devargs, port_id);
692 }
693
694 /* detach the device, then store the name of the device */
695 int
696 rte_eth_dev_detach(uint8_t port_id, char *name)
697 {
698         struct rte_pci_addr addr;
699         int ret;
700
701         if (name == NULL)
702                 return -EINVAL;
703
704         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
705                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
706                 if (ret < 0)
707                         return ret;
708
709                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
710                 if (ret == 0)
711                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
712                                 "%04x:%02x:%02x.%d",
713                                 addr.domain, addr.bus,
714                                 addr.devid, addr.function);
715
716                 return ret;
717         } else
718                 return rte_eth_dev_detach_vdev(port_id, name);
719 }
720
721 static int
722 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
723 {
724         uint16_t old_nb_queues = dev->data->nb_rx_queues;
725         void **rxq;
726         unsigned i;
727
728         if (dev->data->rx_queues == NULL) { /* first time configuration */
729                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
730                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
731                                 RTE_CACHE_LINE_SIZE);
732                 if (dev->data->rx_queues == NULL) {
733                         dev->data->nb_rx_queues = 0;
734                         return -(ENOMEM);
735                 }
736         } else { /* re-configure */
737                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
738
739                 rxq = dev->data->rx_queues;
740
741                 for (i = nb_queues; i < old_nb_queues; i++)
742                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
743                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
744                                 RTE_CACHE_LINE_SIZE);
745                 if (rxq == NULL)
746                         return -(ENOMEM);
747                 if (nb_queues > old_nb_queues) {
748                         uint16_t new_qs = nb_queues - old_nb_queues;
749
750                         memset(rxq + old_nb_queues, 0,
751                                 sizeof(rxq[0]) * new_qs);
752                 }
753
754                 dev->data->rx_queues = rxq;
755
756         }
757         dev->data->nb_rx_queues = nb_queues;
758         return 0;
759 }
760
761 int
762 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
763 {
764         struct rte_eth_dev *dev;
765
766         /* This function is only safe when called from the primary process
767          * in a multi-process setup*/
768         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
769
770         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
771
772         dev = &rte_eth_devices[port_id];
773         if (rx_queue_id >= dev->data->nb_rx_queues) {
774                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
775                 return -EINVAL;
776         }
777
778         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
779
780         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
781
782 }
783
784 int
785 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
786 {
787         struct rte_eth_dev *dev;
788
789         /* This function is only safe when called from the primary process
790          * in a multi-process setup*/
791         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
792
793         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
794
795         dev = &rte_eth_devices[port_id];
796         if (rx_queue_id >= dev->data->nb_rx_queues) {
797                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
798                 return -EINVAL;
799         }
800
801         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
802
803         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
804
805 }
806
807 int
808 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
809 {
810         struct rte_eth_dev *dev;
811
812         /* This function is only safe when called from the primary process
813          * in a multi-process setup*/
814         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
815
816         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
817
818         dev = &rte_eth_devices[port_id];
819         if (tx_queue_id >= dev->data->nb_tx_queues) {
820                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
821                 return -EINVAL;
822         }
823
824         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
825
826         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
827
828 }
829
830 int
831 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
832 {
833         struct rte_eth_dev *dev;
834
835         /* This function is only safe when called from the primary process
836          * in a multi-process setup*/
837         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
838
839         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
840
841         dev = &rte_eth_devices[port_id];
842         if (tx_queue_id >= dev->data->nb_tx_queues) {
843                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
844                 return -EINVAL;
845         }
846
847         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
848
849         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
850
851 }
852
853 static int
854 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
855 {
856         uint16_t old_nb_queues = dev->data->nb_tx_queues;
857         void **txq;
858         unsigned i;
859
860         if (dev->data->tx_queues == NULL) { /* first time configuration */
861                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
862                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
863                                                    RTE_CACHE_LINE_SIZE);
864                 if (dev->data->tx_queues == NULL) {
865                         dev->data->nb_tx_queues = 0;
866                         return -(ENOMEM);
867                 }
868         } else { /* re-configure */
869                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
870
871                 txq = dev->data->tx_queues;
872
873                 for (i = nb_queues; i < old_nb_queues; i++)
874                         (*dev->dev_ops->tx_queue_release)(txq[i]);
875                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
876                                   RTE_CACHE_LINE_SIZE);
877                 if (txq == NULL)
878                         return -ENOMEM;
879                 if (nb_queues > old_nb_queues) {
880                         uint16_t new_qs = nb_queues - old_nb_queues;
881
882                         memset(txq + old_nb_queues, 0,
883                                sizeof(txq[0]) * new_qs);
884                 }
885
886                 dev->data->tx_queues = txq;
887
888         }
889         dev->data->nb_tx_queues = nb_queues;
890         return 0;
891 }
892
893 static int
894 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
895 {
896         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
897
898         switch (nb_rx_q) {
899         case 1:
900         case 2:
901                 RTE_ETH_DEV_SRIOV(dev).active =
902                         ETH_64_POOLS;
903                 break;
904         case 4:
905                 RTE_ETH_DEV_SRIOV(dev).active =
906                         ETH_32_POOLS;
907                 break;
908         default:
909                 return -EINVAL;
910         }
911
912         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
913         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
914                 dev->pci_dev->max_vfs * nb_rx_q;
915
916         return 0;
917 }
918
919 static int
920 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
921                           const struct rte_eth_conf *dev_conf)
922 {
923         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
924
925         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
926                 /* check multi-queue mode */
927                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
928                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
929                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
930                         /* SRIOV only works in VMDq enable mode */
931                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
932                                         " SRIOV active, "
933                                         "wrong VMDQ mq_mode rx %u tx %u\n",
934                                         port_id,
935                                         dev_conf->rxmode.mq_mode,
936                                         dev_conf->txmode.mq_mode);
937                         return -EINVAL;
938                 }
939
940                 switch (dev_conf->rxmode.mq_mode) {
941                 case ETH_MQ_RX_VMDQ_DCB:
942                 case ETH_MQ_RX_VMDQ_DCB_RSS:
943                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
944                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
945                                         " SRIOV active, "
946                                         "unsupported VMDQ mq_mode rx %u\n",
947                                         port_id, dev_conf->rxmode.mq_mode);
948                         return -EINVAL;
949                 case ETH_MQ_RX_RSS:
950                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
951                                         " SRIOV active, "
952                                         "Rx mq mode is changed from:"
953                                         "mq_mode %u into VMDQ mq_mode %u\n",
954                                         port_id,
955                                         dev_conf->rxmode.mq_mode,
956                                         dev->data->dev_conf.rxmode.mq_mode);
957                 case ETH_MQ_RX_VMDQ_RSS:
958                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
959                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
960                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
961                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
962                                                         " SRIOV active, invalid queue"
963                                                         " number for VMDQ RSS, allowed"
964                                                         " value are 1, 2 or 4\n",
965                                                         port_id);
966                                         return -EINVAL;
967                                 }
968                         break;
969                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
970                         /* if nothing mq mode configure, use default scheme */
971                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
972                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
973                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
974                         break;
975                 }
976
977                 switch (dev_conf->txmode.mq_mode) {
978                 case ETH_MQ_TX_VMDQ_DCB:
979                         /* DCB VMDQ in SRIOV mode, not implement yet */
980                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
981                                         " SRIOV active, "
982                                         "unsupported VMDQ mq_mode tx %u\n",
983                                         port_id, dev_conf->txmode.mq_mode);
984                         return -EINVAL;
985                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
986                         /* if nothing mq mode configure, use default scheme */
987                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
988                         break;
989                 }
990
991                 /* check valid queue number */
992                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
993                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
994                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
995                                         "queue number must less equal to %d\n",
996                                         port_id,
997                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
998                         return -EINVAL;
999                 }
1000         } else {
1001                 /* For vmdb+dcb mode check our configuration before we go further */
1002                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1003                         const struct rte_eth_vmdq_dcb_conf *conf;
1004
1005                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1006                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
1007                                                 "!= %d\n",
1008                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1009                                 return -EINVAL;
1010                         }
1011                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
1012                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1013                               conf->nb_queue_pools == ETH_32_POOLS)) {
1014                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1015                                                 "nb_queue_pools must be %d or %d\n",
1016                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1017                                 return -EINVAL;
1018                         }
1019                 }
1020                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1021                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1022
1023                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1024                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
1025                                                 "!= %d\n",
1026                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1027                                 return -EINVAL;
1028                         }
1029                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
1030                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1031                               conf->nb_queue_pools == ETH_32_POOLS)) {
1032                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1033                                                 "nb_queue_pools != %d or nb_queue_pools "
1034                                                 "!= %d\n",
1035                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1036                                 return -EINVAL;
1037                         }
1038                 }
1039
1040                 /* For DCB mode check our configuration before we go further */
1041                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1042                         const struct rte_eth_dcb_rx_conf *conf;
1043
1044                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
1045                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
1046                                                 "!= %d\n",
1047                                                 port_id, ETH_DCB_NUM_QUEUES);
1048                                 return -EINVAL;
1049                         }
1050                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
1051                         if (!(conf->nb_tcs == ETH_4_TCS ||
1052                               conf->nb_tcs == ETH_8_TCS)) {
1053                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1054                                                 "nb_tcs != %d or nb_tcs "
1055                                                 "!= %d\n",
1056                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1057                                 return -EINVAL;
1058                         }
1059                 }
1060
1061                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1062                         const struct rte_eth_dcb_tx_conf *conf;
1063
1064                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
1065                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
1066                                                 "!= %d\n",
1067                                                 port_id, ETH_DCB_NUM_QUEUES);
1068                                 return -EINVAL;
1069                         }
1070                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
1071                         if (!(conf->nb_tcs == ETH_4_TCS ||
1072                               conf->nb_tcs == ETH_8_TCS)) {
1073                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1074                                                 "nb_tcs != %d or nb_tcs "
1075                                                 "!= %d\n",
1076                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1077                                 return -EINVAL;
1078                         }
1079                 }
1080         }
1081         return 0;
1082 }
1083
1084 int
1085 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1086                       const struct rte_eth_conf *dev_conf)
1087 {
1088         struct rte_eth_dev *dev;
1089         struct rte_eth_dev_info dev_info;
1090         int diag;
1091
1092         /* This function is only safe when called from the primary process
1093          * in a multi-process setup*/
1094         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1095
1096         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1097
1098         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1099                 PMD_DEBUG_TRACE(
1100                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1101                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1102                 return -EINVAL;
1103         }
1104
1105         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1106                 PMD_DEBUG_TRACE(
1107                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1108                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1109                 return -EINVAL;
1110         }
1111
1112         dev = &rte_eth_devices[port_id];
1113
1114         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1115         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1116
1117         if (dev->data->dev_started) {
1118                 PMD_DEBUG_TRACE(
1119                     "port %d must be stopped to allow configuration\n", port_id);
1120                 return -EBUSY;
1121         }
1122
1123         /*
1124          * Check that the numbers of RX and TX queues are not greater
1125          * than the maximum number of RX and TX queues supported by the
1126          * configured device.
1127          */
1128         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1129         if (nb_rx_q > dev_info.max_rx_queues) {
1130                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1131                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1132                 return -EINVAL;
1133         }
1134         if (nb_rx_q == 0) {
1135                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
1136                 return -EINVAL;
1137         }
1138
1139         if (nb_tx_q > dev_info.max_tx_queues) {
1140                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1141                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1142                 return -EINVAL;
1143         }
1144         if (nb_tx_q == 0) {
1145                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
1146                 return -EINVAL;
1147         }
1148
1149         /* Copy the dev_conf parameter into the dev structure */
1150         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1151
1152         /*
1153          * If link state interrupt is enabled, check that the
1154          * device supports it.
1155          */
1156         if (dev_conf->intr_conf.lsc == 1) {
1157                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
1158
1159                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1160                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1161                                         pci_drv->name);
1162                         return -EINVAL;
1163                 }
1164         }
1165
1166         /*
1167          * If jumbo frames are enabled, check that the maximum RX packet
1168          * length is supported by the configured device.
1169          */
1170         if (dev_conf->rxmode.jumbo_frame == 1) {
1171                 if (dev_conf->rxmode.max_rx_pkt_len >
1172                     dev_info.max_rx_pktlen) {
1173                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1174                                 " > max valid value %u\n",
1175                                 port_id,
1176                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1177                                 (unsigned)dev_info.max_rx_pktlen);
1178                         return -EINVAL;
1179                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1180                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1181                                 " < min valid value %u\n",
1182                                 port_id,
1183                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1184                                 (unsigned)ETHER_MIN_LEN);
1185                         return -EINVAL;
1186                 }
1187         } else {
1188                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1189                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1190                         /* Use default value */
1191                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1192                                                         ETHER_MAX_LEN;
1193         }
1194
1195         /* multiple queue mode checking */
1196         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
1197         if (diag != 0) {
1198                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
1199                                 port_id, diag);
1200                 return diag;
1201         }
1202
1203         /*
1204          * Setup new number of RX/TX queues and reconfigure device.
1205          */
1206         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1207         if (diag != 0) {
1208                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1209                                 port_id, diag);
1210                 return diag;
1211         }
1212
1213         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1214         if (diag != 0) {
1215                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1216                                 port_id, diag);
1217                 rte_eth_dev_rx_queue_config(dev, 0);
1218                 return diag;
1219         }
1220
1221         diag = (*dev->dev_ops->dev_configure)(dev);
1222         if (diag != 0) {
1223                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1224                                 port_id, diag);
1225                 rte_eth_dev_rx_queue_config(dev, 0);
1226                 rte_eth_dev_tx_queue_config(dev, 0);
1227                 return diag;
1228         }
1229
1230         return 0;
1231 }
1232
1233 static void
1234 rte_eth_dev_config_restore(uint8_t port_id)
1235 {
1236         struct rte_eth_dev *dev;
1237         struct rte_eth_dev_info dev_info;
1238         struct ether_addr addr;
1239         uint16_t i;
1240         uint32_t pool = 0;
1241
1242         dev = &rte_eth_devices[port_id];
1243
1244         rte_eth_dev_info_get(port_id, &dev_info);
1245
1246         if (RTE_ETH_DEV_SRIOV(dev).active)
1247                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1248
1249         /* replay MAC address configuration */
1250         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1251                 addr = dev->data->mac_addrs[i];
1252
1253                 /* skip zero address */
1254                 if (is_zero_ether_addr(&addr))
1255                         continue;
1256
1257                 /* add address to the hardware */
1258                 if  (*dev->dev_ops->mac_addr_add &&
1259                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1260                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1261                 else {
1262                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1263                                         port_id);
1264                         /* exit the loop but not return an error */
1265                         break;
1266                 }
1267         }
1268
1269         /* replay promiscuous configuration */
1270         if (rte_eth_promiscuous_get(port_id) == 1)
1271                 rte_eth_promiscuous_enable(port_id);
1272         else if (rte_eth_promiscuous_get(port_id) == 0)
1273                 rte_eth_promiscuous_disable(port_id);
1274
1275         /* replay all multicast configuration */
1276         if (rte_eth_allmulticast_get(port_id) == 1)
1277                 rte_eth_allmulticast_enable(port_id);
1278         else if (rte_eth_allmulticast_get(port_id) == 0)
1279                 rte_eth_allmulticast_disable(port_id);
1280 }
1281
1282 int
1283 rte_eth_dev_start(uint8_t port_id)
1284 {
1285         struct rte_eth_dev *dev;
1286         int diag;
1287
1288         /* This function is only safe when called from the primary process
1289          * in a multi-process setup*/
1290         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1291
1292         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1293
1294         dev = &rte_eth_devices[port_id];
1295
1296         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1297
1298         if (dev->data->dev_started != 0) {
1299                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1300                         " already started\n",
1301                         port_id);
1302                 return 0;
1303         }
1304
1305         diag = (*dev->dev_ops->dev_start)(dev);
1306         if (diag == 0)
1307                 dev->data->dev_started = 1;
1308         else
1309                 return diag;
1310
1311         rte_eth_dev_config_restore(port_id);
1312
1313         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1314                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1315                 (*dev->dev_ops->link_update)(dev, 0);
1316         }
1317         return 0;
1318 }
1319
1320 void
1321 rte_eth_dev_stop(uint8_t port_id)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         /* This function is only safe when called from the primary process
1326          * in a multi-process setup*/
1327         PROC_PRIMARY_OR_RET();
1328
1329         VALID_PORTID_OR_RET(port_id);
1330         dev = &rte_eth_devices[port_id];
1331
1332         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1333
1334         if (dev->data->dev_started == 0) {
1335                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1336                         " already stopped\n",
1337                         port_id);
1338                 return;
1339         }
1340
1341         dev->data->dev_started = 0;
1342         (*dev->dev_ops->dev_stop)(dev);
1343 }
1344
1345 int
1346 rte_eth_dev_set_link_up(uint8_t port_id)
1347 {
1348         struct rte_eth_dev *dev;
1349
1350         /* This function is only safe when called from the primary process
1351          * in a multi-process setup*/
1352         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1353
1354         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1355
1356         dev = &rte_eth_devices[port_id];
1357
1358         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1359         return (*dev->dev_ops->dev_set_link_up)(dev);
1360 }
1361
1362 int
1363 rte_eth_dev_set_link_down(uint8_t port_id)
1364 {
1365         struct rte_eth_dev *dev;
1366
1367         /* This function is only safe when called from the primary process
1368          * in a multi-process setup*/
1369         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1370
1371         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1372
1373         dev = &rte_eth_devices[port_id];
1374
1375         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1376         return (*dev->dev_ops->dev_set_link_down)(dev);
1377 }
1378
1379 void
1380 rte_eth_dev_close(uint8_t port_id)
1381 {
1382         struct rte_eth_dev *dev;
1383
1384         /* This function is only safe when called from the primary process
1385          * in a multi-process setup*/
1386         PROC_PRIMARY_OR_RET();
1387
1388         VALID_PORTID_OR_RET(port_id);
1389         dev = &rte_eth_devices[port_id];
1390
1391         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1392         dev->data->dev_started = 0;
1393         (*dev->dev_ops->dev_close)(dev);
1394 }
1395
1396 int
1397 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1398                        uint16_t nb_rx_desc, unsigned int socket_id,
1399                        const struct rte_eth_rxconf *rx_conf,
1400                        struct rte_mempool *mp)
1401 {
1402         int ret;
1403         uint32_t mbp_buf_size;
1404         struct rte_eth_dev *dev;
1405         struct rte_eth_dev_info dev_info;
1406
1407         /* This function is only safe when called from the primary process
1408          * in a multi-process setup*/
1409         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1410
1411         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1412
1413         dev = &rte_eth_devices[port_id];
1414         if (rx_queue_id >= dev->data->nb_rx_queues) {
1415                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1416                 return -EINVAL;
1417         }
1418
1419         if (dev->data->dev_started) {
1420                 PMD_DEBUG_TRACE(
1421                     "port %d must be stopped to allow configuration\n", port_id);
1422                 return -EBUSY;
1423         }
1424
1425         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1426         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1427
1428         /*
1429          * Check the size of the mbuf data buffer.
1430          * This value must be provided in the private data of the memory pool.
1431          * First check that the memory pool has a valid private data.
1432          */
1433         rte_eth_dev_info_get(port_id, &dev_info);
1434         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1435                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1436                                 mp->name, (int) mp->private_data_size,
1437                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1438                 return -ENOSPC;
1439         }
1440         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1441
1442         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1443                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1444                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1445                                 "=%d)\n",
1446                                 mp->name,
1447                                 (int)mbp_buf_size,
1448                                 (int)(RTE_PKTMBUF_HEADROOM +
1449                                       dev_info.min_rx_bufsize),
1450                                 (int)RTE_PKTMBUF_HEADROOM,
1451                                 (int)dev_info.min_rx_bufsize);
1452                 return -EINVAL;
1453         }
1454
1455         if (rx_conf == NULL)
1456                 rx_conf = &dev_info.default_rxconf;
1457
1458         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1459                                               socket_id, rx_conf, mp);
1460         if (!ret) {
1461                 if (!dev->data->min_rx_buf_size ||
1462                     dev->data->min_rx_buf_size > mbp_buf_size)
1463                         dev->data->min_rx_buf_size = mbp_buf_size;
1464         }
1465
1466         return ret;
1467 }
1468
1469 int
1470 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1471                        uint16_t nb_tx_desc, unsigned int socket_id,
1472                        const struct rte_eth_txconf *tx_conf)
1473 {
1474         struct rte_eth_dev *dev;
1475         struct rte_eth_dev_info dev_info;
1476
1477         /* This function is only safe when called from the primary process
1478          * in a multi-process setup*/
1479         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1480
1481         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1482
1483         dev = &rte_eth_devices[port_id];
1484         if (tx_queue_id >= dev->data->nb_tx_queues) {
1485                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1486                 return -EINVAL;
1487         }
1488
1489         if (dev->data->dev_started) {
1490                 PMD_DEBUG_TRACE(
1491                     "port %d must be stopped to allow configuration\n", port_id);
1492                 return -EBUSY;
1493         }
1494
1495         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1497
1498         rte_eth_dev_info_get(port_id, &dev_info);
1499
1500         if (tx_conf == NULL)
1501                 tx_conf = &dev_info.default_txconf;
1502
1503         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1504                                                socket_id, tx_conf);
1505 }
1506
1507 void
1508 rte_eth_promiscuous_enable(uint8_t port_id)
1509 {
1510         struct rte_eth_dev *dev;
1511
1512         VALID_PORTID_OR_RET(port_id);
1513         dev = &rte_eth_devices[port_id];
1514
1515         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1516         (*dev->dev_ops->promiscuous_enable)(dev);
1517         dev->data->promiscuous = 1;
1518 }
1519
1520 void
1521 rte_eth_promiscuous_disable(uint8_t port_id)
1522 {
1523         struct rte_eth_dev *dev;
1524
1525         VALID_PORTID_OR_RET(port_id);
1526         dev = &rte_eth_devices[port_id];
1527
1528         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1529         dev->data->promiscuous = 0;
1530         (*dev->dev_ops->promiscuous_disable)(dev);
1531 }
1532
1533 int
1534 rte_eth_promiscuous_get(uint8_t port_id)
1535 {
1536         struct rte_eth_dev *dev;
1537
1538         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1539
1540         dev = &rte_eth_devices[port_id];
1541         return dev->data->promiscuous;
1542 }
1543
1544 void
1545 rte_eth_allmulticast_enable(uint8_t port_id)
1546 {
1547         struct rte_eth_dev *dev;
1548
1549         VALID_PORTID_OR_RET(port_id);
1550         dev = &rte_eth_devices[port_id];
1551
1552         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1553         (*dev->dev_ops->allmulticast_enable)(dev);
1554         dev->data->all_multicast = 1;
1555 }
1556
1557 void
1558 rte_eth_allmulticast_disable(uint8_t port_id)
1559 {
1560         struct rte_eth_dev *dev;
1561
1562         VALID_PORTID_OR_RET(port_id);
1563         dev = &rte_eth_devices[port_id];
1564
1565         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1566         dev->data->all_multicast = 0;
1567         (*dev->dev_ops->allmulticast_disable)(dev);
1568 }
1569
1570 int
1571 rte_eth_allmulticast_get(uint8_t port_id)
1572 {
1573         struct rte_eth_dev *dev;
1574
1575         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1576
1577         dev = &rte_eth_devices[port_id];
1578         return dev->data->all_multicast;
1579 }
1580
1581 static inline int
1582 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1583                                 struct rte_eth_link *link)
1584 {
1585         struct rte_eth_link *dst = link;
1586         struct rte_eth_link *src = &(dev->data->dev_link);
1587
1588         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1589                                         *(uint64_t *)src) == 0)
1590                 return -1;
1591
1592         return 0;
1593 }
1594
1595 void
1596 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1597 {
1598         struct rte_eth_dev *dev;
1599
1600         VALID_PORTID_OR_RET(port_id);
1601         dev = &rte_eth_devices[port_id];
1602
1603         if (dev->data->dev_conf.intr_conf.lsc != 0)
1604                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1605         else {
1606                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1607                 (*dev->dev_ops->link_update)(dev, 1);
1608                 *eth_link = dev->data->dev_link;
1609         }
1610 }
1611
1612 void
1613 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1614 {
1615         struct rte_eth_dev *dev;
1616
1617         VALID_PORTID_OR_RET(port_id);
1618         dev = &rte_eth_devices[port_id];
1619
1620         if (dev->data->dev_conf.intr_conf.lsc != 0)
1621                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1622         else {
1623                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1624                 (*dev->dev_ops->link_update)(dev, 0);
1625                 *eth_link = dev->data->dev_link;
1626         }
1627 }
1628
1629 int
1630 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1631 {
1632         struct rte_eth_dev *dev;
1633
1634         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1635
1636         dev = &rte_eth_devices[port_id];
1637         memset(stats, 0, sizeof(*stats));
1638
1639         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1640         (*dev->dev_ops->stats_get)(dev, stats);
1641         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1642         return 0;
1643 }
1644
1645 void
1646 rte_eth_stats_reset(uint8_t port_id)
1647 {
1648         struct rte_eth_dev *dev;
1649
1650         VALID_PORTID_OR_RET(port_id);
1651         dev = &rte_eth_devices[port_id];
1652
1653         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1654         (*dev->dev_ops->stats_reset)(dev);
1655 }
1656
1657 /* retrieve ethdev extended statistics */
1658 int
1659 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1660         unsigned n)
1661 {
1662         struct rte_eth_stats eth_stats;
1663         struct rte_eth_dev *dev;
1664         unsigned count, i, q;
1665         uint64_t val, *stats_ptr;
1666
1667         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1668
1669         dev = &rte_eth_devices[port_id];
1670
1671         /* implemented by the driver */
1672         if (dev->dev_ops->xstats_get != NULL)
1673                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1674
1675         /* else, return generic statistics */
1676         count = RTE_NB_STATS;
1677         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1678         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1679         if (n < count)
1680                 return count;
1681
1682         /* now fill the xstats structure */
1683
1684         count = 0;
1685         rte_eth_stats_get(port_id, &eth_stats);
1686
1687         /* global stats */
1688         for (i = 0; i < RTE_NB_STATS; i++) {
1689                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1690                                         rte_stats_strings[i].offset);
1691                 val = *stats_ptr;
1692                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1693                         "%s", rte_stats_strings[i].name);
1694                 xstats[count++].value = val;
1695         }
1696
1697         /* per-rxq stats */
1698         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1699                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1700                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1701                                         rte_rxq_stats_strings[i].offset +
1702                                         q * sizeof(uint64_t));
1703                         val = *stats_ptr;
1704                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1705                                 "rx_queue_%u_%s", q,
1706                                 rte_rxq_stats_strings[i].name);
1707                         xstats[count++].value = val;
1708                 }
1709         }
1710
1711         /* per-txq stats */
1712         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1713                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1714                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1715                                         rte_txq_stats_strings[i].offset +
1716                                         q * sizeof(uint64_t));
1717                         val = *stats_ptr;
1718                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1719                                 "tx_queue_%u_%s", q,
1720                                 rte_txq_stats_strings[i].name);
1721                         xstats[count++].value = val;
1722                 }
1723         }
1724
1725         return count;
1726 }
1727
1728 /* reset ethdev extended statistics */
1729 void
1730 rte_eth_xstats_reset(uint8_t port_id)
1731 {
1732         struct rte_eth_dev *dev;
1733
1734         VALID_PORTID_OR_RET(port_id);
1735         dev = &rte_eth_devices[port_id];
1736
1737         /* implemented by the driver */
1738         if (dev->dev_ops->xstats_reset != NULL) {
1739                 (*dev->dev_ops->xstats_reset)(dev);
1740                 return;
1741         }
1742
1743         /* fallback to default */
1744         rte_eth_stats_reset(port_id);
1745 }
1746
1747 static int
1748 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1749                 uint8_t is_rx)
1750 {
1751         struct rte_eth_dev *dev;
1752
1753         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1754
1755         dev = &rte_eth_devices[port_id];
1756
1757         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1758         return (*dev->dev_ops->queue_stats_mapping_set)
1759                         (dev, queue_id, stat_idx, is_rx);
1760 }
1761
1762
1763 int
1764 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1765                 uint8_t stat_idx)
1766 {
1767         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1768                         STAT_QMAP_TX);
1769 }
1770
1771
1772 int
1773 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1774                 uint8_t stat_idx)
1775 {
1776         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1777                         STAT_QMAP_RX);
1778 }
1779
1780
1781 void
1782 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1783 {
1784         struct rte_eth_dev *dev;
1785
1786         VALID_PORTID_OR_RET(port_id);
1787         dev = &rte_eth_devices[port_id];
1788
1789         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1790
1791         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1792         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1793         dev_info->pci_dev = dev->pci_dev;
1794         if (dev->driver)
1795                 dev_info->driver_name = dev->driver->pci_drv.name;
1796 }
1797
1798 void
1799 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1800 {
1801         struct rte_eth_dev *dev;
1802
1803         VALID_PORTID_OR_RET(port_id);
1804         dev = &rte_eth_devices[port_id];
1805         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1806 }
1807
1808
1809 int
1810 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1811 {
1812         struct rte_eth_dev *dev;
1813
1814         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1815
1816         dev = &rte_eth_devices[port_id];
1817         *mtu = dev->data->mtu;
1818         return 0;
1819 }
1820
1821 int
1822 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1823 {
1824         int ret;
1825         struct rte_eth_dev *dev;
1826
1827         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1828         dev = &rte_eth_devices[port_id];
1829         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1830
1831         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1832         if (!ret)
1833                 dev->data->mtu = mtu;
1834
1835         return ret;
1836 }
1837
1838 int
1839 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1840 {
1841         struct rte_eth_dev *dev;
1842
1843         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1844         dev = &rte_eth_devices[port_id];
1845         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1846                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1847                 return -ENOSYS;
1848         }
1849
1850         if (vlan_id > 4095) {
1851                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1852                                 port_id, (unsigned) vlan_id);
1853                 return -EINVAL;
1854         }
1855         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1856
1857         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1858 }
1859
1860 int
1861 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1862 {
1863         struct rte_eth_dev *dev;
1864
1865         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1866         dev = &rte_eth_devices[port_id];
1867         if (rx_queue_id >= dev->data->nb_rx_queues) {
1868                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1869                 return -EINVAL;
1870         }
1871
1872         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1873         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1874
1875         return 0;
1876 }
1877
1878 int
1879 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1880 {
1881         struct rte_eth_dev *dev;
1882
1883         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1884         dev = &rte_eth_devices[port_id];
1885         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1886         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1887
1888         return 0;
1889 }
1890
1891 int
1892 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1893 {
1894         struct rte_eth_dev *dev;
1895         int ret = 0;
1896         int mask = 0;
1897         int cur, org = 0;
1898
1899         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1900         dev = &rte_eth_devices[port_id];
1901
1902         /*check which option changed by application*/
1903         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1904         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1905         if (cur != org) {
1906                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1907                 mask |= ETH_VLAN_STRIP_MASK;
1908         }
1909
1910         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1911         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1912         if (cur != org) {
1913                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1914                 mask |= ETH_VLAN_FILTER_MASK;
1915         }
1916
1917         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1918         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1919         if (cur != org) {
1920                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1921                 mask |= ETH_VLAN_EXTEND_MASK;
1922         }
1923
1924         /*no change*/
1925         if (mask == 0)
1926                 return ret;
1927
1928         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1929         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1930
1931         return ret;
1932 }
1933
1934 int
1935 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1936 {
1937         struct rte_eth_dev *dev;
1938         int ret = 0;
1939
1940         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1941         dev = &rte_eth_devices[port_id];
1942
1943         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1944                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1945
1946         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1947                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1948
1949         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1950                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1951
1952         return ret;
1953 }
1954
1955 int
1956 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1957 {
1958         struct rte_eth_dev *dev;
1959
1960         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1961         dev = &rte_eth_devices[port_id];
1962         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1963         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1964
1965         return 0;
1966 }
1967
1968 int
1969 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1970                                       struct rte_fdir_filter *fdir_filter,
1971                                       uint8_t queue)
1972 {
1973         struct rte_eth_dev *dev;
1974
1975         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1976         dev = &rte_eth_devices[port_id];
1977
1978         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1979                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1980                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1981                 return -ENOSYS;
1982         }
1983
1984         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1985              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1986             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1987                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
1988                                 "None l4type, source & destinations ports "
1989                                 "should be null!\n");
1990                 return -EINVAL;
1991         }
1992
1993         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1994         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1995                                                                 queue);
1996 }
1997
1998 int
1999 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
2000                                          struct rte_fdir_filter *fdir_filter,
2001                                          uint8_t queue)
2002 {
2003         struct rte_eth_dev *dev;
2004
2005         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2006         dev = &rte_eth_devices[port_id];
2007
2008         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2009                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2010                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2011                 return -ENOSYS;
2012         }
2013
2014         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2015              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2016             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2017                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2018                                 "None l4type, source & destinations ports "
2019                                 "should be null!\n");
2020                 return -EINVAL;
2021         }
2022
2023         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
2024         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
2025                                                                 queue);
2026
2027 }
2028
2029 int
2030 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
2031                                          struct rte_fdir_filter *fdir_filter)
2032 {
2033         struct rte_eth_dev *dev;
2034
2035         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2036         dev = &rte_eth_devices[port_id];
2037
2038         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2039                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2040                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2041                 return -ENOSYS;
2042         }
2043
2044         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2045              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2046             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2047                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2048                                 "None l4type source & destinations ports "
2049                                 "should be null!\n");
2050                 return -EINVAL;
2051         }
2052
2053         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
2054         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
2055 }
2056
2057 int
2058 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
2059 {
2060         struct rte_eth_dev *dev;
2061
2062         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2063         dev = &rte_eth_devices[port_id];
2064         if (!(dev->data->dev_conf.fdir_conf.mode)) {
2065                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2066                 return -ENOSYS;
2067         }
2068
2069         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
2070
2071         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
2072         return 0;
2073 }
2074
2075 int
2076 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
2077                                     struct rte_fdir_filter *fdir_filter,
2078                                     uint16_t soft_id, uint8_t queue,
2079                                     uint8_t drop)
2080 {
2081         struct rte_eth_dev *dev;
2082
2083         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2084         dev = &rte_eth_devices[port_id];
2085
2086         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2087                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2088                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2089                 return -ENOSYS;
2090         }
2091
2092         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2093              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2094             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2095                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2096                                 "None l4type, source & destinations ports "
2097                                 "should be null!\n");
2098                 return -EINVAL;
2099         }
2100
2101         /* For now IPv6 is not supported with perfect filter */
2102         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2103                 return -ENOTSUP;
2104
2105         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
2106         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
2107                                                                 soft_id, queue,
2108                                                                 drop);
2109 }
2110
2111 int
2112 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2113                                        struct rte_fdir_filter *fdir_filter,
2114                                        uint16_t soft_id, uint8_t queue,
2115                                        uint8_t drop)
2116 {
2117         struct rte_eth_dev *dev;
2118
2119         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2120         dev = &rte_eth_devices[port_id];
2121
2122         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2123                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2124                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2125                 return -ENOSYS;
2126         }
2127
2128         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2129              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2130             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2131                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2132                                 "None l4type, source & destinations ports "
2133                                 "should be null!\n");
2134                 return -EINVAL;
2135         }
2136
2137         /* For now IPv6 is not supported with perfect filter */
2138         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2139                 return -ENOTSUP;
2140
2141         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2142         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2143                                                         soft_id, queue, drop);
2144 }
2145
2146 int
2147 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2148                                        struct rte_fdir_filter *fdir_filter,
2149                                        uint16_t soft_id)
2150 {
2151         struct rte_eth_dev *dev;
2152
2153         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2154         dev = &rte_eth_devices[port_id];
2155
2156         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2157                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2158                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2159                 return -ENOSYS;
2160         }
2161
2162         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2163              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2164             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2165                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and "
2166                                 "None l4type, source & destinations ports "
2167                                 "should be null!\n");
2168                 return -EINVAL;
2169         }
2170
2171         /* For now IPv6 is not supported with perfect filter */
2172         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2173                 return -ENOTSUP;
2174
2175         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2176         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2177                                                                 soft_id);
2178 }
2179
2180 int
2181 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2182 {
2183         struct rte_eth_dev *dev;
2184
2185         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2186         dev = &rte_eth_devices[port_id];
2187         if (!(dev->data->dev_conf.fdir_conf.mode)) {
2188                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2189                 return -ENOSYS;
2190         }
2191
2192         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2193         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2194 }
2195
2196 int
2197 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2198 {
2199         struct rte_eth_dev *dev;
2200
2201         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2202         dev = &rte_eth_devices[port_id];
2203         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2204         memset(fc_conf, 0, sizeof(*fc_conf));
2205         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2206 }
2207
2208 int
2209 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2210 {
2211         struct rte_eth_dev *dev;
2212
2213         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2214         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2215                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2216                 return -EINVAL;
2217         }
2218
2219         dev = &rte_eth_devices[port_id];
2220         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2221         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2222 }
2223
2224 int
2225 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2226 {
2227         struct rte_eth_dev *dev;
2228
2229         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2230         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2231                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2232                 return -EINVAL;
2233         }
2234
2235         dev = &rte_eth_devices[port_id];
2236         /* High water, low water validation are device specific */
2237         if  (*dev->dev_ops->priority_flow_ctrl_set)
2238                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2239         return -ENOTSUP;
2240 }
2241
2242 static int
2243 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2244                         uint16_t reta_size)
2245 {
2246         uint16_t i, num;
2247
2248         if (!reta_conf)
2249                 return -EINVAL;
2250
2251         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2252                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2253                                                         RTE_RETA_GROUP_SIZE);
2254                 return -EINVAL;
2255         }
2256
2257         num = reta_size / RTE_RETA_GROUP_SIZE;
2258         for (i = 0; i < num; i++) {
2259                 if (reta_conf[i].mask)
2260                         return 0;
2261         }
2262
2263         return -EINVAL;
2264 }
2265
2266 static int
2267 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2268                          uint16_t reta_size,
2269                          uint8_t max_rxq)
2270 {
2271         uint16_t i, idx, shift;
2272
2273         if (!reta_conf)
2274                 return -EINVAL;
2275
2276         if (max_rxq == 0) {
2277                 PMD_DEBUG_TRACE("No receive queue is available\n");
2278                 return -EINVAL;
2279         }
2280
2281         for (i = 0; i < reta_size; i++) {
2282                 idx = i / RTE_RETA_GROUP_SIZE;
2283                 shift = i % RTE_RETA_GROUP_SIZE;
2284                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2285                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2286                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2287                                 "the maximum rxq index: %u\n", idx, shift,
2288                                 reta_conf[idx].reta[shift], max_rxq);
2289                         return -EINVAL;
2290                 }
2291         }
2292
2293         return 0;
2294 }
2295
2296 int
2297 rte_eth_dev_rss_reta_update(uint8_t port_id,
2298                             struct rte_eth_rss_reta_entry64 *reta_conf,
2299                             uint16_t reta_size)
2300 {
2301         struct rte_eth_dev *dev;
2302         int ret;
2303
2304         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2305         /* Check mask bits */
2306         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2307         if (ret < 0)
2308                 return ret;
2309
2310         dev = &rte_eth_devices[port_id];
2311
2312         /* Check entry value */
2313         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2314                                 dev->data->nb_rx_queues);
2315         if (ret < 0)
2316                 return ret;
2317
2318         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2319         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2320 }
2321
2322 int
2323 rte_eth_dev_rss_reta_query(uint8_t port_id,
2324                            struct rte_eth_rss_reta_entry64 *reta_conf,
2325                            uint16_t reta_size)
2326 {
2327         struct rte_eth_dev *dev;
2328         int ret;
2329
2330         if (port_id >= nb_ports) {
2331                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2332                 return -ENODEV;
2333         }
2334
2335         /* Check mask bits */
2336         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2337         if (ret < 0)
2338                 return ret;
2339
2340         dev = &rte_eth_devices[port_id];
2341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2342         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2343 }
2344
2345 int
2346 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2347 {
2348         struct rte_eth_dev *dev;
2349         uint16_t rss_hash_protos;
2350
2351         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2352         rss_hash_protos = rss_conf->rss_hf;
2353         if ((rss_hash_protos != 0) &&
2354             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2355                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2356                                 rss_hash_protos);
2357                 return -EINVAL;
2358         }
2359         dev = &rte_eth_devices[port_id];
2360         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2361         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2362 }
2363
2364 int
2365 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2366                               struct rte_eth_rss_conf *rss_conf)
2367 {
2368         struct rte_eth_dev *dev;
2369
2370         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2371         dev = &rte_eth_devices[port_id];
2372         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2373         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2374 }
2375
2376 int
2377 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2378                            struct rte_eth_udp_tunnel *udp_tunnel)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2383         if (udp_tunnel == NULL) {
2384                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2385                 return -EINVAL;
2386         }
2387
2388         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2389                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2390                 return -EINVAL;
2391         }
2392
2393         dev = &rte_eth_devices[port_id];
2394         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2395         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2396 }
2397
2398 int
2399 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2400                               struct rte_eth_udp_tunnel *udp_tunnel)
2401 {
2402         struct rte_eth_dev *dev;
2403
2404         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2405         dev = &rte_eth_devices[port_id];
2406
2407         if (udp_tunnel == NULL) {
2408                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2409                 return -EINVAL;
2410         }
2411
2412         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2413                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2414                 return -EINVAL;
2415         }
2416
2417         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2418         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2419 }
2420
2421 int
2422 rte_eth_led_on(uint8_t port_id)
2423 {
2424         struct rte_eth_dev *dev;
2425
2426         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2427         dev = &rte_eth_devices[port_id];
2428         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2429         return (*dev->dev_ops->dev_led_on)(dev);
2430 }
2431
2432 int
2433 rte_eth_led_off(uint8_t port_id)
2434 {
2435         struct rte_eth_dev *dev;
2436
2437         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2438         dev = &rte_eth_devices[port_id];
2439         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2440         return (*dev->dev_ops->dev_led_off)(dev);
2441 }
2442
2443 /*
2444  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2445  * an empty spot.
2446  */
2447 static int
2448 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2449 {
2450         struct rte_eth_dev_info dev_info;
2451         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2452         unsigned i;
2453
2454         rte_eth_dev_info_get(port_id, &dev_info);
2455
2456         for (i = 0; i < dev_info.max_mac_addrs; i++)
2457                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2458                         return i;
2459
2460         return -1;
2461 }
2462
2463 static const struct ether_addr null_mac_addr;
2464
2465 int
2466 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2467                         uint32_t pool)
2468 {
2469         struct rte_eth_dev *dev;
2470         int index;
2471         uint64_t pool_mask;
2472
2473         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2474         dev = &rte_eth_devices[port_id];
2475         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2476
2477         if (is_zero_ether_addr(addr)) {
2478                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2479                         port_id);
2480                 return -EINVAL;
2481         }
2482         if (pool >= ETH_64_POOLS) {
2483                 PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2484                 return -EINVAL;
2485         }
2486
2487         index = get_mac_addr_index(port_id, addr);
2488         if (index < 0) {
2489                 index = get_mac_addr_index(port_id, &null_mac_addr);
2490                 if (index < 0) {
2491                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2492                                 port_id);
2493                         return -ENOSPC;
2494                 }
2495         } else {
2496                 pool_mask = dev->data->mac_pool_sel[index];
2497
2498                 /* Check if both MAC address and pool is already there, and do nothing */
2499                 if (pool_mask & (1ULL << pool))
2500                         return 0;
2501         }
2502
2503         /* Update NIC */
2504         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2505
2506         /* Update address in NIC data structure */
2507         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2508
2509         /* Update pool bitmap in NIC data structure */
2510         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2511
2512         return 0;
2513 }
2514
2515 int
2516 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2517 {
2518         struct rte_eth_dev *dev;
2519         int index;
2520
2521         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2522         dev = &rte_eth_devices[port_id];
2523         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2524
2525         index = get_mac_addr_index(port_id, addr);
2526         if (index == 0) {
2527                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2528                 return -EADDRINUSE;
2529         } else if (index < 0)
2530                 return 0;  /* Do nothing if address wasn't found */
2531
2532         /* Update NIC */
2533         (*dev->dev_ops->mac_addr_remove)(dev, index);
2534
2535         /* Update address in NIC data structure */
2536         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2537
2538         /* reset pool bitmap */
2539         dev->data->mac_pool_sel[index] = 0;
2540
2541         return 0;
2542 }
2543
2544 int
2545 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2546                                 uint16_t rx_mode, uint8_t on)
2547 {
2548         uint16_t num_vfs;
2549         struct rte_eth_dev *dev;
2550         struct rte_eth_dev_info dev_info;
2551
2552         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553
2554         dev = &rte_eth_devices[port_id];
2555         rte_eth_dev_info_get(port_id, &dev_info);
2556
2557         num_vfs = dev_info.max_vfs;
2558         if (vf > num_vfs) {
2559                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2560                 return -EINVAL;
2561         }
2562
2563         if (rx_mode == 0) {
2564                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2565                 return -EINVAL;
2566         }
2567         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2568         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2569 }
2570
2571 /*
2572  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2573  * an empty spot.
2574  */
2575 static int
2576 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2577 {
2578         struct rte_eth_dev_info dev_info;
2579         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2580         unsigned i;
2581
2582         rte_eth_dev_info_get(port_id, &dev_info);
2583         if (!dev->data->hash_mac_addrs)
2584                 return -1;
2585
2586         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2587                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2588                         ETHER_ADDR_LEN) == 0)
2589                         return i;
2590
2591         return -1;
2592 }
2593
2594 int
2595 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2596                                 uint8_t on)
2597 {
2598         int index;
2599         int ret;
2600         struct rte_eth_dev *dev;
2601
2602         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2603
2604         dev = &rte_eth_devices[port_id];
2605         if (is_zero_ether_addr(addr)) {
2606                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2607                         port_id);
2608                 return -EINVAL;
2609         }
2610
2611         index = get_hash_mac_addr_index(port_id, addr);
2612         /* Check if it's already there, and do nothing */
2613         if ((index >= 0) && (on))
2614                 return 0;
2615
2616         if (index < 0) {
2617                 if (!on) {
2618                         PMD_DEBUG_TRACE("port %d: the MAC address was not "
2619                                 "set in UTA\n", port_id);
2620                         return -EINVAL;
2621                 }
2622
2623                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2624                 if (index < 0) {
2625                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2626                                         port_id);
2627                         return -ENOSPC;
2628                 }
2629         }
2630
2631         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2632         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2633         if (ret == 0) {
2634                 /* Update address in NIC data structure */
2635                 if (on)
2636                         ether_addr_copy(addr,
2637                                         &dev->data->hash_mac_addrs[index]);
2638                 else
2639                         ether_addr_copy(&null_mac_addr,
2640                                         &dev->data->hash_mac_addrs[index]);
2641         }
2642
2643         return ret;
2644 }
2645
2646 int
2647 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2648 {
2649         struct rte_eth_dev *dev;
2650
2651         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2652
2653         dev = &rte_eth_devices[port_id];
2654
2655         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2656         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2657 }
2658
2659 int
2660 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2661 {
2662         uint16_t num_vfs;
2663         struct rte_eth_dev *dev;
2664         struct rte_eth_dev_info dev_info;
2665
2666         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2667
2668         dev = &rte_eth_devices[port_id];
2669         rte_eth_dev_info_get(port_id, &dev_info);
2670
2671         num_vfs = dev_info.max_vfs;
2672         if (vf > num_vfs) {
2673                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2674                 return -EINVAL;
2675         }
2676
2677         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2678         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2679 }
2680
2681 int
2682 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2683 {
2684         uint16_t num_vfs;
2685         struct rte_eth_dev *dev;
2686         struct rte_eth_dev_info dev_info;
2687
2688         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2689
2690         dev = &rte_eth_devices[port_id];
2691         rte_eth_dev_info_get(port_id, &dev_info);
2692
2693         num_vfs = dev_info.max_vfs;
2694         if (vf > num_vfs) {
2695                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2696                 return -EINVAL;
2697         }
2698
2699         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2700         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2701 }
2702
2703 int
2704 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2705                                uint64_t vf_mask, uint8_t vlan_on)
2706 {
2707         struct rte_eth_dev *dev;
2708
2709         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2710
2711         dev = &rte_eth_devices[port_id];
2712
2713         if (vlan_id > ETHER_MAX_VLAN_ID) {
2714                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2715                         vlan_id);
2716                 return -EINVAL;
2717         }
2718
2719         if (vf_mask == 0) {
2720                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2721                 return -EINVAL;
2722         }
2723
2724         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2725         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2726                                                    vf_mask, vlan_on);
2727 }
2728
2729 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2730                                         uint16_t tx_rate)
2731 {
2732         struct rte_eth_dev *dev;
2733         struct rte_eth_dev_info dev_info;
2734         struct rte_eth_link link;
2735
2736         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2737
2738         dev = &rte_eth_devices[port_id];
2739         rte_eth_dev_info_get(port_id, &dev_info);
2740         link = dev->data->dev_link;
2741
2742         if (queue_idx > dev_info.max_tx_queues) {
2743                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2744                                 "invalid queue id=%d\n", port_id, queue_idx);
2745                 return -EINVAL;
2746         }
2747
2748         if (tx_rate > link.link_speed) {
2749                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2750                                 "bigger than link speed= %d\n",
2751                         tx_rate, link.link_speed);
2752                 return -EINVAL;
2753         }
2754
2755         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2756         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2757 }
2758
2759 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2760                                 uint64_t q_msk)
2761 {
2762         struct rte_eth_dev *dev;
2763         struct rte_eth_dev_info dev_info;
2764         struct rte_eth_link link;
2765
2766         if (q_msk == 0)
2767                 return 0;
2768
2769         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2770
2771         dev = &rte_eth_devices[port_id];
2772         rte_eth_dev_info_get(port_id, &dev_info);
2773         link = dev->data->dev_link;
2774
2775         if (vf > dev_info.max_vfs) {
2776                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2777                                 "invalid vf id=%d\n", port_id, vf);
2778                 return -EINVAL;
2779         }
2780
2781         if (tx_rate > link.link_speed) {
2782                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2783                                 "bigger than link speed= %d\n",
2784                                 tx_rate, link.link_speed);
2785                 return -EINVAL;
2786         }
2787
2788         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2789         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2790 }
2791
2792 int
2793 rte_eth_mirror_rule_set(uint8_t port_id,
2794                         struct rte_eth_mirror_conf *mirror_conf,
2795                         uint8_t rule_id, uint8_t on)
2796 {
2797         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2798
2799         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2800         if (mirror_conf->rule_type == 0) {
2801                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2802                 return -EINVAL;
2803         }
2804
2805         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2806                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2807                                 ETH_64_POOLS - 1);
2808                 return -EINVAL;
2809         }
2810
2811         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2812              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2813             (mirror_conf->pool_mask == 0)) {
2814                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2815                 return -EINVAL;
2816         }
2817
2818         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2819             mirror_conf->vlan.vlan_mask == 0) {
2820                 PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2821                 return -EINVAL;
2822         }
2823
2824         dev = &rte_eth_devices[port_id];
2825         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2826
2827         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2828 }
2829
2830 int
2831 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2832 {
2833         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2834
2835         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2836
2837         dev = &rte_eth_devices[port_id];
2838         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2839
2840         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2841 }
2842
2843 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2844 uint16_t
2845 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2846                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2847 {
2848         struct rte_eth_dev *dev;
2849
2850         VALID_PORTID_OR_ERR_RET(port_id, 0);
2851
2852         dev = &rte_eth_devices[port_id];
2853         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2854         if (queue_id >= dev->data->nb_rx_queues) {
2855                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2856                 return 0;
2857         }
2858         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2859                                                 rx_pkts, nb_pkts);
2860 }
2861
2862 uint16_t
2863 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2864                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2865 {
2866         struct rte_eth_dev *dev;
2867
2868         VALID_PORTID_OR_ERR_RET(port_id, 0);
2869
2870         dev = &rte_eth_devices[port_id];
2871
2872         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2873         if (queue_id >= dev->data->nb_tx_queues) {
2874                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2875                 return 0;
2876         }
2877         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2878                                                 tx_pkts, nb_pkts);
2879 }
2880
2881 uint32_t
2882 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2883 {
2884         struct rte_eth_dev *dev;
2885
2886         VALID_PORTID_OR_ERR_RET(port_id, 0);
2887
2888         dev = &rte_eth_devices[port_id];
2889         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2890         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2891 }
2892
2893 int
2894 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2895 {
2896         struct rte_eth_dev *dev;
2897
2898         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2899
2900         dev = &rte_eth_devices[port_id];
2901         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2902         return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
2903                                                    offset);
2904 }
2905 #endif
2906
2907 int
2908 rte_eth_dev_callback_register(uint8_t port_id,
2909                         enum rte_eth_event_type event,
2910                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2911 {
2912         struct rte_eth_dev *dev;
2913         struct rte_eth_dev_callback *user_cb;
2914
2915         if (!cb_fn)
2916                 return -EINVAL;
2917
2918         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2919
2920         dev = &rte_eth_devices[port_id];
2921         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2922
2923         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2924                 if (user_cb->cb_fn == cb_fn &&
2925                         user_cb->cb_arg == cb_arg &&
2926                         user_cb->event == event) {
2927                         break;
2928                 }
2929         }
2930
2931         /* create a new callback. */
2932         if (user_cb == NULL &&
2933             (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2934                                    sizeof(struct rte_eth_dev_callback), 0))) {
2935                 user_cb->cb_fn = cb_fn;
2936                 user_cb->cb_arg = cb_arg;
2937                 user_cb->event = event;
2938                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2939         }
2940
2941         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2942         return (user_cb == NULL) ? -ENOMEM : 0;
2943 }
2944
2945 int
2946 rte_eth_dev_callback_unregister(uint8_t port_id,
2947                         enum rte_eth_event_type event,
2948                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2949 {
2950         int ret;
2951         struct rte_eth_dev *dev;
2952         struct rte_eth_dev_callback *cb, *next;
2953
2954         if (!cb_fn)
2955                 return -EINVAL;
2956
2957         VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2958
2959         dev = &rte_eth_devices[port_id];
2960         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2961
2962         ret = 0;
2963         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2964
2965                 next = TAILQ_NEXT(cb, next);
2966
2967                 if (cb->cb_fn != cb_fn || cb->event != event ||
2968                                 (cb->cb_arg != (void *)-1 &&
2969                                 cb->cb_arg != cb_arg))
2970                         continue;
2971
2972                 /*
2973                  * if this callback is not executing right now,
2974                  * then remove it.
2975                  */
2976                 if (cb->active == 0) {
2977                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2978                         rte_free(cb);
2979                 } else {
2980                         ret = -EAGAIN;
2981                 }
2982         }
2983
2984         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2985         return ret;
2986 }
2987
2988 void
2989 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2990         enum rte_eth_event_type event)
2991 {
2992         struct rte_eth_dev_callback *cb_lst;
2993         struct rte_eth_dev_callback dev_cb;
2994
2995         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2996         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2997                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2998                         continue;
2999                 dev_cb = *cb_lst;
3000                 cb_lst->active = 1;
3001                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3002                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3003                                                 dev_cb.cb_arg);
3004                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3005                 cb_lst->active = 0;
3006         }
3007         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3008 }
3009 #ifdef RTE_NIC_BYPASS
3010 int rte_eth_dev_bypass_init(uint8_t port_id)
3011 {
3012         struct rte_eth_dev *dev;
3013
3014         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3015
3016         dev = &rte_eth_devices[port_id];
3017         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3018         (*dev->dev_ops->bypass_init)(dev);
3019         return 0;
3020 }
3021
3022 int
3023 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3024 {
3025         struct rte_eth_dev *dev;
3026
3027         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028
3029         dev = &rte_eth_devices[port_id];
3030         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3031         (*dev->dev_ops->bypass_state_show)(dev, state);
3032         return 0;
3033 }
3034
3035 int
3036 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3037 {
3038         struct rte_eth_dev *dev;
3039
3040         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041
3042         dev = &rte_eth_devices[port_id];
3043         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3044         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3045         return 0;
3046 }
3047
3048 int
3049 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3050 {
3051         struct rte_eth_dev *dev;
3052
3053         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3054
3055         dev = &rte_eth_devices[port_id];
3056         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3057         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3058         return 0;
3059 }
3060
3061 int
3062 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3063 {
3064         struct rte_eth_dev *dev;
3065
3066         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3067
3068         dev = &rte_eth_devices[port_id];
3069
3070         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3071         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3072         return 0;
3073 }
3074
3075 int
3076 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3077 {
3078         struct rte_eth_dev *dev;
3079
3080         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3081
3082         dev = &rte_eth_devices[port_id];
3083
3084         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3085         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3086         return 0;
3087 }
3088
3089 int
3090 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3091 {
3092         struct rte_eth_dev *dev;
3093
3094         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3095
3096         dev = &rte_eth_devices[port_id];
3097
3098         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3099         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3100         return 0;
3101 }
3102
3103 int
3104 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3105 {
3106         struct rte_eth_dev *dev;
3107
3108         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3109
3110         dev = &rte_eth_devices[port_id];
3111
3112         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3113         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3114         return 0;
3115 }
3116
3117 int
3118 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3119 {
3120         struct rte_eth_dev *dev;
3121
3122         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3123
3124         dev = &rte_eth_devices[port_id];
3125
3126         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3127         (*dev->dev_ops->bypass_wd_reset)(dev);
3128         return 0;
3129 }
3130 #endif
3131
3132 int
3133 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3134 {
3135         struct rte_eth_dev *dev;
3136
3137         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3138
3139         dev = &rte_eth_devices[port_id];
3140         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3141         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3142                                 RTE_ETH_FILTER_NOP, NULL);
3143 }
3144
3145 int
3146 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3147                        enum rte_filter_op filter_op, void *arg)
3148 {
3149         struct rte_eth_dev *dev;
3150
3151         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152
3153         dev = &rte_eth_devices[port_id];
3154         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3155         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3156 }
3157
3158 void *
3159 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3160                 rte_rx_callback_fn fn, void *user_param)
3161 {
3162 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3163         rte_errno = ENOTSUP;
3164         return NULL;
3165 #endif
3166         /* check input parameters */
3167         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3168                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3169                 rte_errno = EINVAL;
3170                 return NULL;
3171         }
3172
3173         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3174
3175         if (cb == NULL) {
3176                 rte_errno = ENOMEM;
3177                 return NULL;
3178         }
3179
3180         cb->fn.rx = fn;
3181         cb->param = user_param;
3182         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3183         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3184         return cb;
3185 }
3186
3187 void *
3188 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3189                 rte_tx_callback_fn fn, void *user_param)
3190 {
3191 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3192         rte_errno = ENOTSUP;
3193         return NULL;
3194 #endif
3195         /* check input parameters */
3196         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3197                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3198                 rte_errno = EINVAL;
3199                 return NULL;
3200         }
3201
3202         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3203
3204         if (cb == NULL) {
3205                 rte_errno = ENOMEM;
3206                 return NULL;
3207         }
3208
3209         cb->fn.tx = fn;
3210         cb->param = user_param;
3211         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3212         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3213         return cb;
3214 }
3215
3216 int
3217 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3218                 struct rte_eth_rxtx_callback *user_cb)
3219 {
3220 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3221         return -ENOTSUP;
3222 #endif
3223         /* Check input parameters. */
3224         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3225                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3226                 return -EINVAL;
3227         }
3228
3229         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3230         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3231         struct rte_eth_rxtx_callback *prev_cb;
3232
3233         /* Reset head pointer and remove user cb if first in the list. */
3234         if (cb == user_cb) {
3235                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3236                 return 0;
3237         }
3238
3239         /* Remove the user cb from the callback list. */
3240         do {
3241                 prev_cb = cb;
3242                 cb = cb->next;
3243
3244                 if (cb == user_cb) {
3245                         prev_cb->next = user_cb->next;
3246                         return 0;
3247                 }
3248
3249         } while (cb != NULL);
3250
3251         /* Callback wasn't found. */
3252         return -EINVAL;
3253 }
3254
3255 int
3256 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3257                 struct rte_eth_rxtx_callback *user_cb)
3258 {
3259 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3260         return -ENOTSUP;
3261 #endif
3262         /* Check input parameters. */
3263         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3264                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3265                 return -EINVAL;
3266         }
3267
3268         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3269         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3270         struct rte_eth_rxtx_callback *prev_cb;
3271
3272         /* Reset head pointer and remove user cb if first in the list. */
3273         if (cb == user_cb) {
3274                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3275                 return 0;
3276         }
3277
3278         /* Remove the user cb from the callback list. */
3279         do {
3280                 prev_cb = cb;
3281                 cb = cb->next;
3282
3283                 if (cb == user_cb) {
3284                         prev_cb->next = user_cb->next;
3285                         return 0;
3286                 }
3287
3288         } while (cb != NULL);
3289
3290         /* Callback wasn't found. */
3291         return -EINVAL;
3292 }
3293
3294 int
3295 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3296                              struct ether_addr *mc_addr_set,
3297                              uint32_t nb_mc_addr)
3298 {
3299         struct rte_eth_dev *dev;
3300
3301         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3302
3303         dev = &rte_eth_devices[port_id];
3304         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3305         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3306 }
3307
3308 int
3309 rte_eth_timesync_enable(uint8_t port_id)
3310 {
3311         struct rte_eth_dev *dev;
3312
3313         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3314         dev = &rte_eth_devices[port_id];
3315
3316         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3317         return (*dev->dev_ops->timesync_enable)(dev);
3318 }
3319
3320 int
3321 rte_eth_timesync_disable(uint8_t port_id)
3322 {
3323         struct rte_eth_dev *dev;
3324
3325         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3326         dev = &rte_eth_devices[port_id];
3327
3328         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3329         return (*dev->dev_ops->timesync_disable)(dev);
3330 }
3331
3332 int
3333 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3334                                    uint32_t flags)
3335 {
3336         struct rte_eth_dev *dev;
3337
3338         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3339         dev = &rte_eth_devices[port_id];
3340
3341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3342         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3343 }
3344
3345 int
3346 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3347 {
3348         struct rte_eth_dev *dev;
3349
3350         VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3351         dev = &rte_eth_devices[port_id];
3352
3353         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3354         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3355 }